summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/amt.c4
-rw-r--r--drivers/net/bareudp.c19
-rw-r--r--drivers/net/bonding/bond_alb.c31
-rw-r--r--drivers/net/bonding/bond_main.c324
-rw-r--r--drivers/net/bonding/bond_netlink.c59
-rw-r--r--drivers/net/bonding/bond_options.c74
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c8
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c9
-rw-r--r--drivers/net/can/dev/bittiming.c20
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c353
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c5
-rw-r--r--drivers/net/can/spi/hi311x.c6
-rw-r--r--drivers/net/can/spi/mcp251x.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c349
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c143
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c153
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h62
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c24
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c417
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c22
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h96
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c446
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c21
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_common.c87
-rw-r--r--drivers/net/dsa/b53/b53_priv.h25
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c19
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h5
-rw-r--r--drivers/net/dsa/b53/b53_srab.c35
-rw-r--r--drivers/net/dsa/bcm_sf2.c54
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c9
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c2
-rw-r--r--drivers/net/dsa/lan9303-core.c16
-rw-r--r--drivers/net/dsa/lantiq_gswip.c62
-rw-r--r--drivers/net/dsa/microchip/ksz8.h1
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c92
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c156
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c21
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h15
-rw-r--r--drivers/net/dsa/mt7530.c19
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c925
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h51
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c94
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c316
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c41
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c81
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c35
-rw-r--r--drivers/net/dsa/ocelot/felix.c843
-rw-r--r--drivers/net/dsa/ocelot/felix.h9
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c48
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c48
-rw-r--r--drivers/net/dsa/qca/ar9331.c45
-rw-r--r--drivers/net/dsa/qca8k.c1582
-rw-r--r--drivers/net/dsa/qca8k.h54
-rw-r--r--drivers/net/dsa/realtek-smi-core.c523
-rw-r--r--drivers/net/dsa/realtek/Kconfig40
-rw-r--r--drivers/net/dsa/realtek/Makefile6
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c290
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c581
-rw-r--r--drivers/net/dsa/realtek/realtek.h (renamed from drivers/net/dsa/realtek-smi-core.h)91
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c (renamed from drivers/net/dsa/rtl8365mb.c)734
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c (renamed from drivers/net/dsa/rtl8366.c)164
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c (renamed from drivers/net/dsa/rtl8366rb.c)460
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c47
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c196
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c16
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c32
-rw-r--r--drivers/net/ethernet/3com/typhoon.c24
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/agere/et131x.c14
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c12
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c83
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c56
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h499
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c152
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c22
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c63
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c59
-rw-r--r--drivers/net/ethernet/cortina/gemini.c8
-rw-r--r--drivers/net/ethernet/davicom/Kconfig31
-rw-r--r--drivers/net/ethernet/davicom/Makefile1
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1260
-rw-r--r--drivers/net/ethernet/davicom/dm9051.h162
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c60
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c437
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c171
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h38
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c150
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c93
-rw-r--r--drivers/net/ethernet/fungible/Kconfig28
-rw-r--r--drivers/net/ethernet/fungible/Makefile7
-rw-r--r--drivers/net/ethernet/fungible/funcore/Makefile5
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c843
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.h150
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h1202
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c601
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h175
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig17
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile10
-rw-r--r--drivers/net/ethernet/fungible/funeth/fun_port.h97
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h171
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c40
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.h13
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c1162
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.c155
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.h30
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2091
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c826
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h117
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c763
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h264
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c98
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c23
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c311
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c194
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h276
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c170
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c345
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c376
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c596
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c466
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2185
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h163
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c994
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c1029
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h290
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c532
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c)3976
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h343
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c439
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c707
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c396
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c38
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c22
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c207
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/jme.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c331
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c247
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c224
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c117
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c170
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c48
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c124
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c87
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c55
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c13
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c412
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c132
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h44
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c355
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c372
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c167
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c844
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c305
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c9
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c380
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c276
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h221
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c566
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c34
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c148
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h56
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c45
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c618
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h121
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c34
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c44
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c26
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h92
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c42
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c37
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c60
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c685
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c251
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h15
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c70
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c35
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c662
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h18
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c38
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c13
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c251
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c470
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c1350
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h106
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c275
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c408
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c1524
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h129
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c195
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h204
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2132
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h87
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c442
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h215
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c170
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c164
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c206
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c125
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c37
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c90
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c94
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c71
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c26
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c9
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c63
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c18
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h6
-rw-r--r--drivers/net/ethernet/sfc/siena.c8
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c388
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c154
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/cassini.c23
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c56
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c228
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h5
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c25
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c7
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h20
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c608
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c89
-rw-r--r--drivers/net/gtp.c567
-rw-r--r--drivers/net/hamradio/baycom_epp.c4
-rw-r--r--drivers/net/hamradio/dmascc.c7
-rw-r--r--drivers/net/hyperv/netvsc.c25
-rw-r--r--drivers/net/ieee802154/atusb.c186
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ipa/gsi_trans.c11
-rw-r--r--drivers/net/ipa/gsi_trans.h10
-rw-r--r--drivers/net/ipa/ipa_data-v3.1.c2
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c2
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c2
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_endpoint.c217
-rw-r--r--drivers/net/ipa/ipa_endpoint.h8
-rw-r--r--drivers/net/ipa/ipa_power.c178
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/loopback.c6
-rw-r--r--drivers/net/macsec.c6
-rw-r--r--drivers/net/macvlan.c22
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig12
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c1082
-rw-r--r--drivers/net/mctp/mctp-serial.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c67
-rw-r--r--drivers/net/mdio/mdio-mux.c4
-rw-r--r--drivers/net/mdio/mdio-xgene.c3
-rw-r--r--drivers/net/mhi_net.c2
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/dev.c102
-rw-r--r--drivers/net/netdevsim/hwstats.c486
-rw-r--r--drivers/net/netdevsim/netdevsim.h25
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs.c41
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/aquantia_main.c4
-rw-r--r--drivers/net/phy/at803x.c146
-rw-r--r--drivers/net/phy/dp83640.c19
-rw-r--r--drivers/net/phy/micrel.c1103
-rw-r--r--drivers/net/phy/microchip_t1.c359
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-core.c22
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/phy/phylink.c90
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/phy/sfp.c48
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/tap.c38
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/tun.c102
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix.h10
-rw-r--r--drivers/net/usb/asix_common.c81
-rw-r--r--drivers/net/usb/asix_devices.c104
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/usb/gl620a.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/smsc95xx.c25
-rw-r--r--drivers/net/veth.c194
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/vxlan/Makefile7
-rw-r--r--drivers/net/vxlan/vxlan_core.c (renamed from drivers/net/vxlan.c)495
-rw-r--r--drivers/net/vxlan/vxlan_multicast.c272
-rw-r--r--drivers/net/vxlan/vxlan_private.h162
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c999
-rw-r--r--drivers/net/wan/lmc/lmc_main.c3
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c153
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h296
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c331
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h40
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c515
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h180
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c357
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c471
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h143
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c116
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c40
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c300
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h132
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c23
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h38
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c72
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c61
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h2
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c6
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/spectral_common.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c107
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c36
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c78
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/cisco/airo.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c331
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c181
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c405
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c361
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c313
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h21
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c410
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c194
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c236
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c422
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c225
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c466
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c310
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c777
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c152
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c1533
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c691
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h893
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1212
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c121
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c209
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h64
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c306
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c252
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c125
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c5
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c298
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c127
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c47
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig4
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c41
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c679
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h291
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c686
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h491
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c646
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h84
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c147
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c361
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h81
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c521
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h75
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h217
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c86
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c2744
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c529
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h76
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c43
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h3
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c3
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c54
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h7
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h133
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c742
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h142
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h1
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/wwan_core.c36
1025 files changed, 79168 insertions, 24552 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 50b23e71065f..3f1192d3c52d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_VXLAN) += vxlan/
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index f1a36d7e2151..10455c9b9da0 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -2373,7 +2373,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
@@ -2470,7 +2470,7 @@ report:
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
true);
dev_sw_netstats_rx_add(amt->dev, len);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index ba587e5fc24f..683203f87ae2 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -148,14 +148,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
err = IP6_ECN_decapsulate(oiph, skb);
if (unlikely(err)) {
if (log_ecn_error) {
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
@@ -221,11 +221,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
- udp_conf.family = AF_INET6;
-#else
- udp_conf.family = AF_INET;
-#endif
+
+ if (ipv6_mod_enabled())
+ udp_conf.family = AF_INET6;
+ else
+ udp_conf.family = AF_INET;
+
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -448,7 +449,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -478,7 +479,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 533e476988f2..303c8d32d451 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/ndisc.h>
#include <asm/byteorder.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
@@ -1269,6 +1270,27 @@ unwind:
return res;
}
+/* determine if the packet is NA or NS */
+static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
+{
+ struct ipv6hdr *ip6hdr;
+ struct icmp6hdr *hdr;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
+ return true;
+
+ ip6hdr = ipv6_hdr(skb);
+ if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
+ return false;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
+ return true;
+
+ hdr = icmp6_hdr(skb);
+ return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
+ hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
+}
+
/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
@@ -1348,8 +1370,11 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
/* Do not TX balance any multicast or broadcast */
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
- case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
+ if (alb_determine_nd(skb, bond))
+ break;
+ fallthrough;
+ case htons(ETH_P_IP):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
tx_slave = tlb_choose_channel(bond,
@@ -1432,10 +1457,12 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
break;
}
- if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ if (alb_determine_nd(skb, bond)) {
do_tx_balance = false;
break;
}
+
+ /* The IPv6 header is pulled by alb_determine_nd */
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index aebeb46e6fa6..15eddca7b4b6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -88,6 +88,7 @@
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
+#include <net/ip6_route.h>
#include "bonding_priv.h"
@@ -2793,31 +2794,15 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return ret;
}
-/* We go to the (large) trouble of VLAN tagging ARP frames because
- * switches in VLAN mode (especially if ports are configured as
- * "native" to a VLAN) might not pass non-tagged frames.
- */
-static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
- __be32 src_ip, struct bond_vlan_tag *tags)
+static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
+ struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct bond_vlan_tag *outer_tag = tags;
- struct net_device *slave_dev = slave->dev;
struct net_device *bond_dev = slave->bond->dev;
-
- slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
- arp_op, &dest_ip, &src_ip);
-
- skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
- NULL, slave_dev->dev_addr, NULL);
-
- if (!skb) {
- net_err_ratelimited("ARP packet allocation failed\n");
- return;
- }
+ struct net_device *slave_dev = slave->dev;
+ struct bond_vlan_tag *outer_tag = tags;
if (!tags || tags->vlan_proto == VLAN_N_VID)
- goto xmit;
+ return true;
tags++;
@@ -2834,7 +2819,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
+ return false;
}
tags++;
@@ -2847,8 +2832,34 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
outer_tag->vlan_id);
}
-xmit:
- arp_xmit(skb);
+ return true;
+}
+
+/* We go to the (large) trouble of VLAN tagging ARP frames because
+ * switches in VLAN mode (especially if ports are configured as
+ * "native" to a VLAN) might not pass non-tagged frames.
+ */
+static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
+
+ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
+ NULL, slave_dev->dev_addr, NULL);
+
+ if (!skb) {
+ net_err_ratelimited("ARP packet allocation failed\n");
+ return;
+ }
+
+ if (bond_handle_vlan(slave, tags, skb))
+ arp_xmit(skb);
+ return;
}
/* Validate the device path between the @start_dev and the @end_dev.
@@ -2965,30 +2976,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
unsigned int alen;
- if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond) && is_arp) ||
- !slave_do_arp_validate_only(bond))
- slave->last_rx = jiffies;
- return RX_HANDLER_ANOTHER;
- } else if (!is_arp) {
- return RX_HANDLER_ANOTHER;
- }
-
alen = arp_hdr_len(bond->dev);
- slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
- __func__, skb->dev->name);
-
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
@@ -3059,6 +3057,216 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct in6_addr mcaddr;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
+ daddr, saddr);
+
+ skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
+ if (!skb) {
+ net_err_ratelimited("NS packet allocation failed\n");
+ return;
+ }
+
+ addrconf_addr_solict_mult(daddr, &mcaddr);
+ if (bond_handle_vlan(slave, tags, skb))
+ ndisc_send_skb(skb, &mcaddr, saddr);
+}
+
+static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct bond_vlan_tag *tags;
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct flowi6 fl6;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
+ __func__, &targets[i]);
+ tags = NULL;
+
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+ fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+ continue;
+ }
+
+ /* bond device itself */
+ if (dst->dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ tags = bond_verify_device_path(bond->dev, dst->dev, 0);
+ rcu_read_unlock();
+
+ if (!IS_ERR_OR_NULL(tags))
+ goto found;
+
+ /* Not our device - skip */
+ slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
+ &targets[i], dst->dev ? dst->dev->name : "NULL");
+
+ dst_release(dst);
+ continue;
+
+found:
+ if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
+ bond_ns_send(slave, &targets[i], &saddr, tags);
+ dst_release(dst);
+ kfree(tags);
+ }
+}
+
+static int bond_confirm_addr6(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct in6_addr *addr = (struct in6_addr *)priv->data;
+
+ return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
+}
+
+static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
+{
+ struct netdev_nested_priv priv = {
+ .data = addr,
+ };
+ int ret = false;
+
+ if (bond_confirm_addr6(bond->dev, &priv))
+ return true;
+
+ rcu_read_lock();
+ if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void bond_validate_ns(struct bonding *bond, struct slave *slave,
+ struct in6_addr *saddr, struct in6_addr *daddr)
+{
+ int i;
+
+ if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
+ __func__, saddr, daddr);
+ return;
+ }
+
+ i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
+ if (i == -1) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
+ __func__, saddr);
+ return;
+ }
+ slave->last_rx = jiffies;
+ slave->target_last_arp_rx[i] = jiffies;
+}
+
+static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+ struct slave *curr_active_slave, *curr_arp_slave;
+ struct icmp6hdr *hdr = icmp6_hdr(skb);
+ struct in6_addr *saddr, *daddr;
+
+ if (skb->pkt_type == PACKET_OTHERHOST ||
+ skb->pkt_type == PACKET_LOOPBACK ||
+ hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+ goto out;
+
+ saddr = &ipv6_hdr(skb)->saddr;
+ daddr = &ipv6_hdr(skb)->daddr;
+
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ saddr, daddr);
+
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+
+ /* We 'trust' the received ARP enough to validate it if:
+ * see bond_arp_rcv().
+ */
+ if (bond_is_active_slave(slave))
+ bond_validate_ns(bond, slave, saddr, daddr);
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
+ bond_validate_ns(bond, slave, saddr, daddr);
+ else if (curr_arp_slave &&
+ bond_time_in_interval(bond,
+ dev_trans_start(curr_arp_slave->dev), 1))
+ bond_validate_ns(bond, slave, saddr, daddr);
+
+out:
+ return RX_HANDLER_ANOTHER;
+}
+#endif
+
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
+#endif
+ bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
+
+ /* Use arp validate logic for both ARP and NS */
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+#if IS_ENABLED(CONFIG_IPV6)
+ (slave_do_arp_validate_only(bond) && is_ipv6) ||
+#endif
+ !slave_do_arp_validate_only(bond))
+ slave->last_rx = jiffies;
+ return RX_HANDLER_ANOTHER;
+ } else if (is_arp) {
+ return bond_arp_rcv(skb, bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (is_ipv6) {
+ return bond_na_rcv(skb, bond, slave);
+#endif
+ } else {
+ return RX_HANDLER_ANOTHER;
+ }
+}
+
+static void bond_send_validate(struct bonding *bond, struct slave *slave)
+{
+ bond_arp_send_all(bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ bond_ns_send_all(bond, slave);
+#endif
+}
+
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
@@ -3154,7 +3362,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
- bond_arp_send_all(bond, slave);
+ bond_send_validate(bond, slave);
}
rcu_read_unlock();
@@ -3368,7 +3576,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
curr_active_slave->dev->name);
if (curr_active_slave) {
- bond_arp_send_all(bond, curr_active_slave);
+ bond_send_validate(bond, curr_active_slave);
return should_notify_rtnl;
}
@@ -3420,7 +3628,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_arp_send_all(bond, new_slave);
+ bond_send_validate(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
@@ -3956,7 +4164,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -4912,7 +5120,7 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
if (xmit_suc)
return NETDEV_TX_OK;
- atomic_long_inc(&bond_dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(bond_dev);
return NET_XMIT_DROP;
}
@@ -5937,6 +6145,7 @@ static int bond_check_params(struct bond_params *params)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+ memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
return 0;
}
@@ -6047,27 +6256,38 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
- struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
+ struct bond_net *bn;
+ struct net *net;
LIST_HEAD(list);
- bond_destroy_sysfs(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn);
+ }
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct bonding *bond, *tmp_bond;
+
+ bn = net_generic(net, bond_net_id);
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, &list);
+ }
unregister_netdevice_many(&list);
rtnl_unlock();
- bond_destroy_proc_dir(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_proc_dir(bn);
+ }
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
- .exit = bond_net_exit,
+ .exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1007bf6d385d..f427fa1737c7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -14,6 +14,7 @@
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <net/bonding.h>
+#include <net/ipv6.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
@@ -111,6 +112,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -272,6 +274,40 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_BOND_NS_IP6_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_ns_ip6_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) {
+ struct in6_addr addr6;
+
+ if (nla_len(attr) < sizeof(addr6)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address");
+ return -EINVAL;
+ }
+
+ addr6 = nla_get_in6_addr(attr);
+
+ if (ipv6_addr_type(&addr6) & IPV6_ADDR_LINKLOCAL) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 addr6");
+ return -EINVAL;
+ }
+
+ bond_opt_initextra(&newval, &addr6, sizeof(addr6));
+ err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
+ &newval);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n");
+ if (err)
+ return err;
+ }
+#endif
if (data[IFLA_BOND_ARP_VALIDATE]) {
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
@@ -526,6 +562,9 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
0;
}
@@ -603,6 +642,26 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.arp_all_targets))
goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (!ipv6_addr_any(&bond->params.ns_targets[i])) {
+ if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i]))
+ goto nla_put_failure;
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+#endif
+
primary = rtnl_dereference(bond->primary_slave);
if (primary &&
nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 2e8484a91a0e..64f7db2627ce 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -34,6 +34,10 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+#if IS_ENABLED(CONFIG_IPV6)
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+#endif
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
@@ -295,6 +299,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
+#if IS_ENABLED(CONFIG_IPV6)
+ [BOND_OPT_NS_TARGETS] = {
+ .id = BOND_OPT_NS_TARGETS,
+ .name = "ns_ip6_target",
+ .desc = "NS targets in ffff:ffff::ffff:ffff form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_ns_ip6_targets_set
+ },
+#endif
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
@@ -1052,7 +1065,7 @@ static int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -1184,6 +1197,65 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+ struct in6_addr *target,
+ unsigned long last_rx)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = *target;
+ }
+}
+
+void bond_option_ns_ip6_targets_clear(struct bonding *bond)
+{
+ struct in6_addr addr_any = in6addr_any;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+ _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0);
+}
+
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct in6_addr *target = (struct in6_addr *)newval->extra;
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct in6_addr addr_any = in6addr_any;
+ int index;
+
+ if (!bond_is_ip6_target_ok(target)) {
+ netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n",
+ target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip6(targets, target) != -1) { /* dup */
+ netdev_err(bond->dev, "NS target %pI6c is already present\n",
+ target);
+ return -EINVAL;
+ }
+
+ index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */
+ if (index == -1) {
+ netdev_err(bond->dev, "NS target table is full!\n");
+ return -EINVAL;
+ }
+
+ netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target);
+
+ _bond_options_ns_ip6_target_set(bond, index, target, jiffies);
+
+ return 0;
+}
+#endif
+
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 46b150e6289e..cfe37be42be4 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -307,7 +307,6 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
}
/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 6a6cdd0bb258..69b0a3751dff 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -15,14 +15,8 @@ struct slave_attribute {
ssize_t (*show)(struct slave *, char *);
};
-#define SLAVE_ATTR(_name, _mode, _show) \
-const struct slave_attribute slave_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
-};
#define SLAVE_ATTR_RO(_name) \
- SLAVE_ATTR(_name, 0444, _name##_show)
+const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
static ssize_t state_show(struct slave *slave, char *buf)
{
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 2a7af611d43a..688075859ae4 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -196,7 +196,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb_reset_mac_header(skb);
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
+ ret = netif_rx(skb);
if (!ret) {
ser->dev->stats.rx_packets++;
ser->dev->stats.rx_bytes += count;
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 6655146294fc..8a826a6813bd 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -11,14 +11,6 @@
#include "c_can.h"
-static void c_can_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
-{
- struct c_can_priv *priv = netdev_priv(netdev);
- strscpy(info->driver, "c_can", sizeof(info->driver));
- strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
-}
-
static void c_can_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -33,7 +25,6 @@ static void c_can_get_ringparam(struct net_device *netdev,
}
static const struct ethtool_ops c_can_ethtool_ops = {
- .get_drvinfo = c_can_get_drvinfo,
.get_ringparam = c_can_get_ringparam,
};
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index d5fca3bfaf9a..2103bcca9012 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -24,7 +24,7 @@
*/
static int
can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
+ const unsigned int sample_point_nominal, const unsigned int tseg,
unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
unsigned int *sample_point_error_ptr)
{
@@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
return best_sample_point;
}
-int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
@@ -208,10 +208,10 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
- struct can_priv *priv = netdev_priv(dev);
+ const struct can_priv *priv = netdev_priv(dev);
unsigned int tseg1, alltseg;
u64 brp64;
@@ -244,25 +244,21 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* Checks the validity of predefined bitrate settings */
static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt)
{
- struct can_priv *priv = netdev_priv(dev);
unsigned int i;
for (i = 0; i < bitrate_const_cnt; i++) {
if (bt->bitrate == bitrate_const[i])
- break;
+ return 0;
}
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
-
- return 0;
+ return -EINVAL;
}
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
const unsigned int bitrate_const_cnt)
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index c192f25f9695..e7ab45f1c43b 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -154,7 +154,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx_ni(skb);
+ netif_rx(skb);
restart:
netdev_dbg(dev, "restarted\n");
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index acd74725831f..1e121e04208c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -44,6 +44,7 @@
enum rcanfd_chip_id {
RENESAS_RCAR_GEN3 = 0,
RENESAS_RZG2L,
+ RENESAS_R8A779A0,
};
/* Global register bits */
@@ -79,6 +80,7 @@ enum rcanfd_chip_id {
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
#define RCANFD_GERFL_EEF1 BIT(17)
#define RCANFD_GERFL_EEF0 BIT(16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
@@ -86,20 +88,26 @@ enum rcanfd_chip_id {
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
-#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
- RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
- (gpriv->fdmode ?\
- RCANFD_GERFL_CMPOF : 0)))
+#define RCANFD_GERFL_ERR(gpriv, x) \
+ ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF0 | RCANFD_GERFL_EEF1) | \
+ RCANFD_GERFL_MES | \
+ ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
/* AFL Rx rules registers */
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
-#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
+#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+ (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
+ (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+
+#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+ (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
+ reg_v3u(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -116,9 +124,15 @@ enum rcanfd_chip_id {
#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
-#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
-#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NTSEG2(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+
+#define RCANFD_NCFG_NTSEG1(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+
+#define RCANFD_NCFG_NSJW(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
@@ -180,11 +194,18 @@ enum rcanfd_chip_id {
/* RSCFDnCFDCmDCFG */
#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
-#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
+
+#define RCANFD_DCFG_DTSEG2(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+
+#define RCANFD_DCFG_DTSEG1(gpriv, x) \
+ (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
+#define RCANFD_FDCFG_CLOE BIT(30)
+#define RCANFD_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -219,10 +240,10 @@ enum rcanfd_chip_id {
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
-#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -282,33 +303,31 @@ enum rcanfd_chip_id {
#define RCANFD_GTSC (0x0094)
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG0 (0x009c)
-/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
-#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
+#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
-#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
-#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40)
/* Common FIFO Control registers */
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
-#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFCC(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
-#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFSTS(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
-#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFPCTR(gpriv, ch, idx) \
+ (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -387,22 +406,23 @@ enum rcanfd_chip_id {
#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
-#define RCANFD_C_RFOFFSET (0x0e00)
-#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
-#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
- (0x10 * (x)))
-#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
- (0x10 * (x)) + (0x04 * (df)))
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) \
+ (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df)))
/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
#define RCANFD_C_CFOFFSET (0x0e80)
-#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
- (0x10 * (idx)))
-#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
- (0x30 * (ch)) + (0x10 * (idx)))
-#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
- (0x30 * (ch)) + (0x10 * (idx)) + \
- (0x04 * (df)))
+
+#define RCANFD_C_CFID(ch, idx) \
+ (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFPTR(ch, idx) \
+ (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFDF(ch, idx, df) \
+ (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
@@ -415,6 +435,12 @@ enum rcanfd_chip_id {
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+/* R-Car V3U Classical and CAN FD mode specific register map */
+#define RCANFD_V3U_CFDCFG (0x1314)
+#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m)))
+
+#define RCANFD_V3U_GAFL_OFFSET (0x1800)
+
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
@@ -434,26 +460,28 @@ enum rcanfd_chip_id {
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET (0x3000)
-#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
-#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
- (0x80 * (x)))
-#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
- (0x80 * (x)))
-#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
- (0x80 * (x)) + (0x04 * (df)))
+#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
+#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
+#define RCANFD_F_RFDF(gpriv, x, df) \
+ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET (0x3400)
-#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
- (0x80 * (idx)))
-#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
- (0x180 * (ch)) + (0x80 * (idx)) + \
- (0x04 * (df)))
+#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400)
+
+#define RCANFD_F_CFID(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFPTR(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFDF(gpriv, ch, idx, df) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
@@ -470,7 +498,7 @@ enum rcanfd_chip_id {
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
-#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
+#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
@@ -521,6 +549,7 @@ struct rcar_canfd_global {
struct reset_control *rstc1;
struct reset_control *rstc2;
enum rcanfd_chip_id chip_id;
+ u32 max_channels;
};
/* CAN FD mode nominal rate constants */
@@ -563,6 +592,17 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
};
/* Helper functions */
+static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+{
+ return gpriv->chip_id == RENESAS_R8A779A0;
+}
+
+static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
+ u32 v3u, u32 not_v3u)
+{
+ return is_v3u(gpriv) ? v3u : not_v3u;
+}
+
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -628,6 +668,25 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+{
+ if (is_v3u(gpriv)) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+ RCANFD_FDCFG_FDOE);
+ else
+ rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+ RCANFD_FDCFG_CLOE);
+ } else {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
+}
+
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
u32 sts, ch;
@@ -660,15 +719,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
+ rcar_canfd_set_mode(gpriv);
/* Transition all Channels to reset mode */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
@@ -709,7 +763,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
/* Channel configuration settings */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
RCANFD_CCTR_ERRD);
rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
@@ -729,20 +783,22 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
start = 0; /* Channel 0 always starts from 0th rule */
} else {
/* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
- start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
+ start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
}
/* Enable write access to entry */
page = RCANFD_GAFL_PAGENUM(start);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
- (RCANFD_GAFLECTR_AFLPN(page) |
+ (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
- RCANFD_GAFLCFG_SETRNC(ch, num_rules));
- if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
+ RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
+ if (is_v3u(gpriv))
+ offset = RCANFD_V3U_GAFL_OFFSET;
+ else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
offset = RCANFD_C_GAFL_OFFSET;
@@ -754,8 +810,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Any data length accepted */
rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
- RCANFD_GAFLP1_GAFLFDP(ridx));
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
rcar_canfd_clear_bit(gpriv->base,
@@ -779,7 +835,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
- rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg);
}
static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
@@ -801,15 +857,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
else
cfpls = 0; /* b000 - Max 8 bytes payload */
- cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
- RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) |
RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
- rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg);
if (gpriv->fdmode)
/* Clear FD mode specific control/status register */
rcar_canfd_write(gpriv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0);
}
static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
@@ -890,20 +946,20 @@ static void rcar_canfd_global_error(struct net_device *ndev)
}
if (gerfl & RCANFD_GERFL_MES) {
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (sts & RCANFD_CFSTS_CFMLT) {
netdev_dbg(ndev, "Tx Message Lost flag\n");
stats->tx_dropped++;
rcar_canfd_write(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFMLT);
}
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (sts & RCANFD_RFSTS_RFMLT) {
netdev_dbg(ndev, "Rx Message Lost flag\n");
stats->rx_dropped++;
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFMLT);
}
}
@@ -1038,6 +1094,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
static void rcar_canfd_tx_done(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct net_device_stats *stats = &ndev->stats;
u32 sts;
unsigned long flags;
@@ -1053,7 +1110,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
unsent = RCANFD_CFSTS_CFMC(sts);
/* Wake producer only when there is room */
@@ -1069,7 +1126,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
} while (1);
/* Clear interrupt */
- rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFTXIF);
can_led_event(ndev, CAN_LED_EVENT_TX);
}
@@ -1091,7 +1148,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_global_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1104,12 +1161,12 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
u32 sts;
/* Handle Rx interrupts */
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (likely(sts & RCANFD_RFSTS_RFIF)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
- RCANFD_RFCC(ridx),
+ RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
__napi_schedule(&priv->napi);
}
@@ -1121,7 +1178,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_global_receive(gpriv, ch);
return IRQ_HANDLED;
@@ -1135,7 +1192,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Global error interrupts still indicate a condition specific
* to a channel. RxFIFO interrupt is a global interrupt.
*/
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_handle_global_err(gpriv, ch);
rcar_canfd_handle_global_receive(gpriv, ch);
}
@@ -1181,7 +1238,7 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
/* Handle Tx interrupts */
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (likely(sts & RCANFD_CFSTS_CFTXIF))
rcar_canfd_tx_done(ndev);
}
@@ -1191,7 +1248,7 @@ static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_channel_tx(gpriv, ch);
return IRQ_HANDLED;
@@ -1223,7 +1280,7 @@ static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels)
rcar_canfd_handle_channel_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1235,7 +1292,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
u32 ch;
/* Common FIFO is a per channel resource */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_handle_channel_err(gpriv, ch);
rcar_canfd_handle_channel_tx(gpriv, ch);
}
@@ -1246,6 +1303,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
static void rcar_canfd_set_bittiming(struct net_device *dev)
{
struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
@@ -1260,8 +1318,8 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
/* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
@@ -1273,16 +1331,25 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+ cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+ if (is_v3u(gpriv)) {
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
+ RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) |
+ RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+ } else {
+ cfg = (RCANFD_CFG_TSEG1(tseg1) |
+ RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) |
+ RCANFD_CFG_TSEG2(tseg2));
+ }
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev,
@@ -1294,6 +1361,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
static int rcar_canfd_start(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err = -EOPNOTSUPP;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1315,9 +1383,9 @@ static int rcar_canfd_start(struct net_device *ndev)
}
/* Enable Common & Rx FIFO */
- rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -1365,6 +1433,7 @@ out_clock:
static void rcar_canfd_stop(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1382,9 +1451,9 @@ static void rcar_canfd_stop(struct net_device *ndev)
rcar_canfd_disable_channel_interrupts(priv);
/* Disable Common & Rx FIFO */
- rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
/* Set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
@@ -1408,6 +1477,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 sts = 0, id, dlc;
unsigned long flags;
@@ -1428,11 +1498,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
rcar_canfd_write(priv->base,
- RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
- RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc);
if (can_is_canfd_skb(skb)) {
/* CAN FD frame format */
@@ -1445,10 +1515,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
rcar_canfd_write(priv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts);
rcar_canfd_put_data(priv, cf,
- RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0));
} else {
rcar_canfd_write(priv->base,
RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
@@ -1471,7 +1541,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
* pointer for the Common FIFO
*/
rcar_canfd_write(priv->base,
- RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+ RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
@@ -1480,18 +1550,21 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
u32 sts = 0, id, dlc;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
- dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
- sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
- if (sts & RCANFD_RFFDSTS_RFFDF)
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx));
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ sts & RCANFD_RFFDSTS_RFFDF)
skb = alloc_canfd_skb(priv->ndev, &cf);
else
skb = alloc_can_skb(priv->ndev,
@@ -1529,12 +1602,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFBRS)
cf->flags |= CANFD_BRS;
- rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
}
} else {
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
+ else if (is_v3u(gpriv))
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
}
@@ -1542,7 +1617,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
/* Write 0xff to RFPC to increment the CPU-side
* pointer of the Rx FIFO
*/
- rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
@@ -1556,13 +1631,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
{
struct rcar_canfd_channel *priv =
container_of(napi, struct rcar_canfd_channel, napi);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int num_pkts;
u32 sts;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
for (num_pkts = 0; num_pkts < quota; num_pkts++) {
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
/* Check FIFO empty condition */
if (sts & RCANFD_RFSTS_RFEMP)
break;
@@ -1571,7 +1647,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* Clear interrupt bit */
if (sts & RCANFD_RFSTS_RFIF)
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFIF);
}
@@ -1579,7 +1655,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
if (num_pkts < quota) {
if (napi_complete_done(napi, num_pkts)) {
/* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
}
}
@@ -1756,21 +1832,24 @@ static int rcar_canfd_probe(struct platform_device *pdev)
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
enum rcanfd_chip_id chip_id;
+ int max_channels;
+ char name[9] = "channelX";
+ int i;
chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ max_channels = chip_id == RENESAS_R8A779A0 ? 8 : 2;
if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(0); /* Channel 0 */
-
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(1); /* Channel 1 */
+ for (i = 0; i < max_channels; ++i) {
+ name[7] = '0' + i;
+ of_child = of_get_child_by_name(pdev->dev.of_node, name);
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(i);
+ }
- if (chip_id == RENESAS_RCAR_GEN3) {
+ if (chip_id != RENESAS_RZG2L) {
ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
if (ch_irq < 0) {
/* For backward compatibility get irq by index */
@@ -1806,6 +1885,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
gpriv->chip_id = chip_id;
+ gpriv->max_channels = max_channels;
if (gpriv->chip_id == RENESAS_RZG2L) {
gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
@@ -1847,7 +1927,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
+ if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id != RENESAS_RZG2L)
/* CANFD clock is further divided by (1/2) within the IP */
fcan_freq /= 2;
@@ -1859,7 +1939,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->base = addr;
/* Request IRQ that's common for both channels */
- if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
+ if (gpriv->chip_id != RENESAS_RZG2L) {
err = devm_request_irq(&pdev->dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
@@ -1925,7 +2005,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_controller(gpriv);
/* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
/* Configure Channel's Rx fifo */
rcar_canfd_configure_rx(gpriv, ch);
@@ -1951,7 +2031,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_mode;
}
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels) {
err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
if (err)
goto fail_channel;
@@ -1963,7 +2043,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
return 0;
fail_channel:
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
rcar_canfd_disable_global_interrupts(gpriv);
@@ -1984,7 +2064,7 @@ static int rcar_canfd_remove(struct platform_device *pdev)
rcar_canfd_reset_controller(gpriv);
rcar_canfd_disable_global_interrupts(gpriv);
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
@@ -2014,6 +2094,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
{ .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
+ { .compatible = "renesas,r8a779a0-canfd", .data = (void *)RENESAS_R8A779A0 },
{ }
};
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 27783fbf011f..ec294d0c5722 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -221,7 +221,7 @@ static void slc_bump(struct slcan *sl)
if (!(cf.can_id & CAN_RTR_FLAG))
sl->dev->stats.rx_bytes += cf.len;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
/* parse tty input stream */
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index d74e895bddf7..8d27ac66ca7f 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -392,13 +392,10 @@ static int softing_netdev_open(struct net_device *ndev)
static int softing_netdev_stop(struct net_device *ndev)
{
- int ret;
-
netif_stop_queue(ndev);
/* softing cycle does close_candev() */
- ret = softing_startstop(ndev, 0);
- return ret;
+ return softing_startstop(ndev, 0);
}
static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 664b8f14d7b0..a5b2952b8d0f 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -356,7 +356,7 @@ static void hi3110_hw_rx(struct spi_device *spi)
can_led_event(priv->net, CAN_LED_EVENT_RX);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void hi3110_hw_sleep(struct spi_device *spi)
@@ -677,7 +677,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx_ni(skb);
+ netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
can_bus_off(net);
@@ -718,7 +718,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index d23edaf22420..fc747bff5eeb 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -740,7 +740,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
can_led_event(priv->net, CAN_LED_EVENT_RX);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -987,7 +987,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
if (skb) {
frame->can_id |= can_id;
frame->data[1] = data1;
- netif_rx_ni(skb);
+ netif_rx(skb);
} else {
netdev_err(net, "cannot allocate error skb\n");
}
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index a83d685d64e0..94d7de954294 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -6,6 +6,8 @@ mcp251xfd-objs :=
mcp251xfd-objs += mcp251xfd-chip-fifo.o
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-ethtool.o
+mcp251xfd-objs += mcp251xfd-ram.o
mcp251xfd-objs += mcp251xfd-regmap.o
mcp251xfd-objs += mcp251xfd-ring.o
mcp251xfd-objs += mcp251xfd-rx.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
index 2f9a623d381d..0d96097a2547 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
@@ -78,7 +78,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* FIFO 1 - TX */
+ /* TX FIFO */
val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
tx_ring->obj_num - 1) |
MCP251XFD_REG_FIFOCON_TXEN |
@@ -99,7 +99,7 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
err = regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
+ MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr),
val);
if (err)
return err;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 65c9b31666a6..325024be7b04 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -112,6 +112,22 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
return "<unknown>";
}
+static const char *
+mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference)
+{
+ switch (~osc & osc_reference &
+ (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) {
+ case MCP251XFD_REG_OSC_PLLRDY:
+ return "PLL";
+ case MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator";
+ case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator/PLL";
+ }
+
+ return "<unknown>";
+}
+
static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
{
if (!priv->reg_vdd)
@@ -178,6 +194,11 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
return 0;
}
+static inline bool mcp251xfd_reg_invalid(u32 reg)
+{
+ return reg == 0x0 || reg == 0xffffffff;
+}
+
static inline int
mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
{
@@ -197,34 +218,55 @@ static int
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
const u8 mode_req, bool nowait)
{
- u32 con, con_reqop;
+ u32 con = 0, con_reqop, osc = 0;
+ u8 mode;
int err;
con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
- if (err)
+ if (err == -EBADMSG) {
+ netdev_err(priv->ndev,
+ "Failed to set Requested Operation Mode.\n");
+
+ return -ENODEV;
+ } else if (err) {
return err;
+ }
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
return 0;
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ !mcp251xfd_reg_invalid(con) &&
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
con) == mode_req,
MCP251XFD_POLL_SLEEP_US,
MCP251XFD_POLL_TIMEOUT_US);
- if (err) {
- u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ if (err != -ETIMEDOUT && err != -EBADMSG)
+ return err;
+
+ /* Ignore return value.
+ * Print below error messages, even if this fails.
+ */
+ regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (mcp251xfd_reg_invalid(con)) {
netdev_err(priv->ndev,
- "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode_req), mode_req,
- mcp251xfd_get_mode_str(mode), mode);
- return err;
+ "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n",
+ con, osc);
+
+ return -ENODEV;
}
- return 0;
+ mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode,
+ con, osc);
+
+ return -ETIMEDOUT;
}
static inline int
@@ -241,27 +283,58 @@ mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, true);
}
-static inline bool mcp251xfd_osc_invalid(u32 reg)
+static int
+mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv,
+ u32 osc_reference, u32 osc_mask)
{
- return reg == 0x0 || reg == 0xffffffff;
+ u32 osc;
+ int err;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ !mcp251xfd_reg_invalid(osc) &&
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (err != -ETIMEDOUT)
+ return err;
+
+ if (mcp251xfd_reg_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to read Oscillator Configuration Register (osc=0x%08x).\n",
+ osc);
+ return -ENODEV;
+ }
+
+ netdev_err(priv->ndev,
+ "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n",
+ mcp251xfd_get_osc_str(osc, osc_reference),
+ osc, osc_reference, osc_mask);
+
+ return -ETIMEDOUT;
}
-static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv)
{
u32 osc, osc_reference, osc_mask;
int err;
- /* Set Power On Defaults for "Clock Output Divisor" and remove
- * "Oscillator Disable" bit.
+ /* For normal sleep on MCP2517FD and MCP2518FD, clearing
+ * "Oscillator Disable" will wake the chip. For low power mode
+ * on MCP2518FD, asserting the chip select will wake the
+ * chip. Writing to the Oscillator register will wake it in
+ * both cases.
*/
osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* We cannot check for the PLL ready bit (either set or
+ * unset), as the PLL might be enabled. This can happen if the
+ * system reboots, while the mcp251xfd stays powered.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY;
- osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY;
- /* Note:
- *
- * If the controller is in Sleep Mode the following write only
+ /* If the controller is in Sleep Mode the following write only
* removes the "Oscillator Disable" bit and powers it up. All
* other bits are unaffected.
*/
@@ -269,24 +342,31 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* Wait for "Oscillator Ready" bit */
- err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
- (osc & osc_mask) == osc_reference,
- MCP251XFD_OSC_STAB_SLEEP_US,
- MCP251XFD_OSC_STAB_TIMEOUT_US);
- if (mcp251xfd_osc_invalid(osc)) {
- netdev_err(priv->ndev,
- "Failed to detect %s (osc=0x%08x).\n",
- mcp251xfd_get_model_str(priv), osc);
- return -ENODEV;
- } else if (err == -ETIMEDOUT) {
- netdev_err(priv->ndev,
- "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
- osc, osc_reference);
- return -ETIMEDOUT;
+ /* Sometimes the PLL is stuck enabled, the controller never
+ * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our
+ * caller takes care of retry.
+ */
+ return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+}
+
+static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv)
+{
+ if (priv->pll_enable) {
+ u32 osc;
+ int err;
+
+ /* Turn off PLL */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ netdev_err(priv->ndev,
+ "Failed to disable PLL.\n");
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow;
}
- return err;
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
}
static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
@@ -294,10 +374,10 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
const __be16 cmd = mcp251xfd_cmd_reset();
int err;
- /* The Set Mode and SPI Reset command only seems to works if
- * the controller is not in Sleep Mode.
+ /* The Set Mode and SPI Reset command only works if the
+ * controller is not in Sleep Mode.
*/
- err = mcp251xfd_chip_clock_enable(priv);
+ err = mcp251xfd_chip_wake(priv);
if (err)
return err;
@@ -311,34 +391,29 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
{
- u32 osc, osc_reference;
+ u32 osc_reference, osc_mask;
u8 mode;
int err;
- err = mcp251xfd_chip_get_mode(priv, &mode);
- if (err)
- return err;
-
- if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
- netdev_info(priv->ndev,
- "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode), mode);
- return -ETIMEDOUT;
- }
-
+ /* Check for reset defaults of OSC reg.
+ * This will take care of stabilization period.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
- /* check reset defaults of OSC reg */
- err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ err = mcp251xfd_chip_get_mode(priv, &mode);
if (err)
return err;
- if (osc != osc_reference) {
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
netdev_info(priv->ndev,
- "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
- osc, osc_reference);
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
return -ETIMEDOUT;
}
@@ -374,7 +449,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
{
- u32 osc;
+ u32 osc, osc_reference, osc_mask;
int err;
/* Activate Low Power Mode on Oscillator Disable. This only
@@ -384,10 +459,29 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
osc = MCP251XFD_REG_OSC_LPMEN |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ if (priv->pll_enable) {
+ osc |= MCP251XFD_REG_OSC_PLLEN;
+ osc_reference |= MCP251XFD_REG_OSC_PLLRDY;
+ }
+
err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
if (err)
return err;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast;
+
+ return 0;
+}
+
+static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
+{
/* Set Time Base Counter Prescaler to 1.
*
* This means an overflow of the 32 bit Time Base Counter
@@ -628,14 +722,14 @@ static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
}
-static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
- const enum can_state state)
+static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
{
priv->can.state = state;
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
- return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ mcp251xfd_chip_sleep(priv);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
@@ -650,6 +744,10 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
+ err = mcp251xfd_chip_timestamp_init(priv);
+ if (err)
+ goto out_chip_stop;
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -662,7 +760,9 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
- mcp251xfd_ring_init(priv);
+ err = mcp251xfd_ring_init(priv);
+ if (err)
+ goto out_chip_stop;
err = mcp251xfd_chip_fifo_init(priv);
if (err)
@@ -1284,6 +1384,20 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
return 0;
}
+static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
+ size_t len;
+
+ if (priv->rx_ring_num == 1)
+ len = sizeof(priv->regs_status.intf);
+ else
+ len = sizeof(priv->regs_status);
+
+ return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status, len / val_bytes);
+}
+
#define mcp251xfd_handle(priv, irq, ...) \
({ \
struct mcp251xfd_priv *_priv = (priv); \
@@ -1300,7 +1414,6 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
- const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -1312,21 +1425,28 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
if (!rx_pending)
break;
+ /* Assume 1st RX-FIFO pending, if other FIFOs
+ * are pending the main IRQ handler will take
+ * care.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
err = mcp251xfd_handle(priv, rxif);
if (err)
goto out_fail;
handled = IRQ_HANDLED;
- } while (1);
+
+ /* We don't know which RX-FIFO is pending, but only
+ * handle the 1st RX-FIFO. Leave loop here if we have
+ * more than 1 RX-FIFO to avoid starvation.
+ */
+ } while (priv->rx_ring_num == 1);
do {
u32 intf_pending, intf_pending_clearable;
bool set_normal_mode = false;
- err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
- &priv->regs_status,
- sizeof(priv->regs_status) /
- val_bytes);
+ err = mcp251xfd_read_regs_status(priv);
if (err)
goto out_fail;
@@ -1478,6 +1598,7 @@ static int mcp251xfd_open(struct net_device *ndev)
goto out_transceiver_disable;
mcp251xfd_timestamp_init(priv);
+ clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
@@ -1498,6 +1619,7 @@ static int mcp251xfd_open(struct net_device *ndev)
free_irq(spi->irq, priv);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
mcp251xfd_timestamp_stop(priv);
out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
@@ -1517,6 +1639,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
struct mcp251xfd_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ hrtimer_cancel(&priv->rx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
@@ -1621,8 +1745,9 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
- u32 *dev_id, u32 *effective_speed_hz)
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ u32 *effective_speed_hz_slow,
+ u32 *effective_speed_hz_fast)
{
struct mcp251xfd_map_buf_nocrc *buf_rx;
struct mcp251xfd_map_buf_nocrc *buf_tx;
@@ -1641,16 +1766,20 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
xfer[0].tx_buf = buf_tx;
xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
xfer[1].len = sizeof(dev_id);
+ xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+
err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
if (err)
goto out_kfree_buf_tx;
*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
- *effective_speed_hz = xfer->effective_speed_hz;
+ *effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ *effective_speed_hz_fast = xfer[1].effective_speed_hz;
out_kfree_buf_tx:
kfree(buf_tx);
@@ -1666,34 +1795,45 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
static int
mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
{
- u32 dev_id, effective_speed_hz;
+ u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast;
+ unsigned long clk_rate;
int err;
err = mcp251xfd_register_get_dev_id(priv, &dev_id,
- &effective_speed_hz);
+ &effective_speed_hz_slow,
+ &effective_speed_hz_fast);
if (err)
return err;
+ clk_rate = clk_get_rate(priv->clk);
+
netdev_info(priv->ndev,
- "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n",
mcp251xfd_get_model_str(priv),
FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
priv->rx_int ? '+' : '-',
+ priv->pll_enable ? '+' : '-',
MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
MCP251XFD_QUIRK_ACTIVE(CRC_REG),
MCP251XFD_QUIRK_ACTIVE(CRC_RX),
MCP251XFD_QUIRK_ACTIVE(CRC_TX),
MCP251XFD_QUIRK_ACTIVE(ECC),
MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ clk_rate / 1000000,
+ clk_rate % 1000000 / 1000 / 10,
priv->can.clock.freq / 1000000,
priv->can.clock.freq % 1000000 / 1000 / 10,
priv->spi_max_speed_hz_orig / 1000000,
priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
- priv->spi->max_speed_hz / 1000000,
- priv->spi->max_speed_hz % 1000000 / 1000 / 10,
- effective_speed_hz / 1000000,
- effective_speed_hz % 1000000 / 1000 / 10);
+ priv->spi_max_speed_hz_slow / 1000000,
+ priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10,
+ effective_speed_hz_slow / 1000000,
+ effective_speed_hz_slow % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_fast / 1000000,
+ priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10,
+ effective_speed_hz_fast / 1000000,
+ effective_speed_hz_fast % 1000000 / 1000 / 10);
return 0;
}
@@ -1719,19 +1859,27 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
if (err == -ENODEV)
goto out_runtime_disable;
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_sleep;
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ mcp251xfd_ethtool_init(priv);
err = register_candev(ndev);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_done(priv);
if (err)
@@ -1741,7 +1889,7 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
* disable the clocks and vdd. If CONFIG_PM is not enabled,
* the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ err = mcp251xfd_chip_sleep(priv);
if (err)
goto out_unregister_candev;
@@ -1751,8 +1899,8 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
out_unregister_candev:
unregister_candev(ndev);
- out_chip_set_mode_sleep:
- mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
out_runtime_put_noidle:
@@ -1768,10 +1916,10 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- pm_runtime_get_sync(ndev->dev.parent);
- pm_runtime_put_noidle(ndev->dev.parent);
- mcp251xfd_clks_and_vdd_disable(priv);
- pm_runtime_disable(ndev->dev.parent);
+ if (pm_runtime_enabled(ndev->dev.parent))
+ pm_runtime_disable(ndev->dev.parent);
+ else
+ mcp251xfd_clks_and_vdd_disable(priv);
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -1814,6 +1962,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
+ bool pll_enable = false;
u32 freq = 0;
int err;
@@ -1864,12 +2013,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
return -ERANGE;
}
- if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
- dev_err(&spi->dev,
- "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
- freq);
- return -ERANGE;
- }
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER)
+ pll_enable = true;
ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
MCP251XFD_TX_OBJ_NUM_MAX);
@@ -1885,6 +2030,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv = netdev_priv(ndev);
spi_set_drvdata(spi, priv);
priv->can.clock.freq = freq;
+ if (pll_enable)
+ priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER;
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
@@ -1893,10 +2040,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC;
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
priv->clk = clk;
+ priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
@@ -1934,7 +2083,16 @@ static int mcp251xfd_probe(struct spi_device *spi)
*
*/
priv->spi_max_speed_hz_orig = spi->max_speed_hz;
- spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ priv->spi_max_speed_hz_slow = min(spi->max_speed_hz,
+ freq / 2 / 1000 * 850);
+ if (priv->pll_enable)
+ priv->spi_max_speed_hz_fast = min(spi->max_speed_hz,
+ freq *
+ MCP251XFD_OSC_PLL_MULTIPLIER /
+ 2 / 1000 * 850);
+ else
+ priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow;
+ spi->max_speed_hz = priv->spi_max_speed_hz_slow;
spi->bits_per_word = 8;
spi->rt = true;
err = spi_setup(spi);
@@ -1951,8 +2109,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
goto out_free_candev;
err = mcp251xfd_register(priv);
- if (err)
+ if (err) {
+ dev_err_probe(&spi->dev, err, "Failed to detect %s.\n",
+ mcp251xfd_get_model_str(priv));
goto out_can_rx_offload_del;
+ }
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index ffae8fdd3af0..c991b30bc9f0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -207,10 +207,10 @@ static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv,
.val = tx->base,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
- .val = 0,
+ .val = tx->nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
- .val = MCP251XFD_TX_FIFO,
+ .val = tx->fifo_nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
.val = tx->obj_num,
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
new file mode 100644
index 000000000000..6c7a57f16cc6
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static void
+mcp251xfd_ring_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ ring->rx_max_pending = layout.max_rx;
+ ring->tx_max_pending = layout.max_tx;
+
+ ring->rx_pending = priv->rx_obj_num;
+ ring->tx_pending = priv->tx->obj_num;
+}
+
+static int
+mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode);
+ if ((layout.cur_rx != priv->rx_obj_num ||
+ layout.cur_tx != priv->tx->obj_num) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->tx->obj_num = layout.cur_tx;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 rx_max_frames, tx_max_frames;
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (priv->rx_obj_num_coalesce_irq == 0)
+ rx_max_frames = 1;
+ else
+ rx_max_frames = priv->rx_obj_num_coalesce_irq;
+
+ ec->rx_max_coalesced_frames_irq = rx_max_frames;
+ ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq;
+
+ if (priv->tx_obj_num_coalesce_irq == 0)
+ tx_max_frames = 1;
+ else
+ tx_max_frames = priv->tx_obj_num_coalesce_irq;
+
+ ec->tx_max_coalesced_frames_irq = tx_max_frames;
+ ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode);
+
+ if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq ||
+ ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq ||
+ layout.tx_coalesce != priv->tx_obj_num_coalesce_irq ||
+ ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static const struct ethtool_ops mcp251xfd_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ringparam = mcp251xfd_ring_get_ringparam,
+ .set_ringparam = mcp251xfd_ring_set_ringparam,
+ .get_coalesce = mcp251xfd_ring_get_coalesce,
+ .set_coalesce = mcp251xfd_ring_set_coalesce,
+};
+
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
+{
+ struct can_ram_layout layout;
+
+ priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false);
+ priv->rx_obj_num = layout.default_rx;
+ priv->tx->obj_num = layout.default_tx;
+
+ priv->rx_obj_num_coalesce_irq = 0;
+ priv->tx_obj_num_coalesce_irq = 0;
+ priv->rx_coalesce_usecs_irq = 0;
+ priv->tx_coalesce_usecs_irq = 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
new file mode 100644
index 000000000000..9e8e82cdba46
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd-ram.h"
+
+static inline u8 can_ram_clamp(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ u8 val)
+{
+ u8 max;
+
+ max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth);
+ return clamp(val, obj->min, max);
+}
+
+static u8
+can_ram_rounddown_pow_of_two(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ const u8 coalesce, u8 val)
+{
+ u8 fifo_num = obj->fifo_num;
+ u8 ret = 0, i;
+
+ val = can_ram_clamp(config, obj, val);
+
+ if (coalesce) {
+ /* Use 1st FIFO for coalescing, if requested.
+ *
+ * Either use complete FIFO (and FIFO Full IRQ) for
+ * coalescing or only half of FIFO (FIFO Half Full
+ * IRQ) and use remaining half for normal objects.
+ */
+ ret = min_t(u8, coalesce * 2, config->fifo_depth);
+ val -= ret;
+ fifo_num--;
+ }
+
+ for (i = 0; i < fifo_num && val; i++) {
+ u8 n;
+
+ n = min_t(u8, rounddown_pow_of_two(val),
+ config->fifo_depth);
+
+ /* skip small FIFOs */
+ if (n < obj->fifo_depth_min)
+ return ret;
+
+ ret += n;
+ val -= n;
+ }
+
+ return ret;
+}
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode)
+{
+ u8 num_rx, num_tx;
+ u16 ram_free;
+
+ /* default CAN */
+
+ num_tx = config->tx.def[fd_mode];
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * num_tx;
+
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->default_tx = num_tx;
+
+ /* MAX CAN */
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * config->tx.min;
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ ram_free = config->size;
+ ram_free -= config->rx.size[fd_mode] * config->rx.min;
+ num_tx = ram_free / config->tx.size[fd_mode];
+
+ layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* cur CAN */
+
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->rx_coalesce_usecs_irq == 0 &&
+ ec->rx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_rx / 2, config->fifo_depth);
+ num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq,
+ (u32)config->rx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce);
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx,
+ num_rx_coalesce, num_rx);
+ }
+
+ ram_free = config->size - config->rx.size[fd_mode] * num_rx;
+ num_tx = ram_free / config->tx.size[fd_mode];
+ num_tx = min_t(u8, ring->tx_pending, num_tx);
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->tx_coalesce_usecs_irq == 0 &&
+ ec->tx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_tx / 2, config->fifo_depth);
+ num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq,
+ (u32)config->tx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce);
+
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx,
+ num_tx_coalesce, num_tx);
+ }
+
+ layout->cur_rx = num_rx;
+ layout->cur_tx = num_tx;
+ layout->rx_coalesce = num_rx_coalesce;
+ layout->tx_coalesce = num_tx_coalesce;
+ } else {
+ layout->cur_rx = layout->default_rx;
+ layout->cur_tx = layout->default_tx;
+ layout->rx_coalesce = 0;
+ layout->tx_coalesce = 0;
+ }
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
new file mode 100644
index 000000000000..7558c1510cbf
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2021, 2022 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_RAM_H
+#define _MCP251XFD_RAM_H
+
+#include <linux/ethtool.h>
+
+#define CAN_RAM_NUM_MAX (-1)
+
+enum can_ram_mode {
+ CAN_RAM_MODE_CAN,
+ CAN_RAM_MODE_CANFD,
+ __CAN_RAM_MODE_MAX
+};
+
+struct can_ram_obj_config {
+ u8 size[__CAN_RAM_MODE_MAX];
+
+ u8 def[__CAN_RAM_MODE_MAX];
+ u8 min;
+ u8 max;
+
+ u8 fifo_num;
+ u8 fifo_depth_min;
+ u8 fifo_depth_coalesce_min;
+};
+
+struct can_ram_config {
+ const struct can_ram_obj_config rx;
+ const struct can_ram_obj_config tx;
+
+ u16 size;
+ u8 fifo_depth;
+};
+
+struct can_ram_layout {
+ u8 default_rx;
+ u8 default_tx;
+
+ u8 max_rx;
+ u8 max_tx;
+
+ u8 cur_rx;
+ u8 cur_tx;
+
+ u8 rx_coalesce;
+ u8 tx_coalesce;
+};
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode);
+
+#endif
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 7b120c716228..217510c12af5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -2,8 +2,8 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020 Pengutronix,
-// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
//
#include "mcp251xfd.h"
@@ -47,22 +47,32 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
-static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+static inline bool
+mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
+ unsigned int reg)
{
+ struct mcp251xfd_rx_ring *ring;
+ int n;
+
switch (reg) {
case MCP251XFD_REG_INT:
case MCP251XFD_REG_TEFCON:
- case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_FLTCON(0):
case MCP251XFD_REG_ECCSTAT:
case MCP251XFD_REG_CRC:
return false;
case MCP251XFD_REG_CON:
- case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
return true;
default:
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr))
+ return false;
+ if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr))
+ return true;
+ }
+
WARN(1, "Status of reg 0x%04x unknown.\n", reg);
}
@@ -92,7 +102,7 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- if (mcp251xfd_update_bits_read_reg(reg)) {
+ if (mcp251xfd_update_bits_read_reg(priv, reg)) {
struct spi_transfer xfer[2] = { };
struct spi_message msg;
@@ -368,7 +378,7 @@ mcp251xfd_regmap_crc_read(void *context,
* to the caller. It will take care of both cases.
*
*/
- if (reg == MCP251XFD_REG_OSC) {
+ if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) {
err = 0;
goto out;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 92f9e9b01289..bf3f0f150199 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -15,6 +15,7 @@
#include <asm/unaligned.h>
#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
static inline u8
mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
@@ -53,6 +54,72 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
}
static void
+mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
+{
+ struct mcp251xfd_tef_ring *tef_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* TEF- and TX-FIFO have same number of objects */
+ *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
+ addr, val, val);
+ tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
+ tef_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
+ &tef_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
+ xfer = &tef_ring->uinc_xfer[i];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
+ if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
+ val = MCP251XFD_REG_TEFCON_UINC |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &tef_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+}
+
+static void
mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_tx_ring *ring,
struct mcp251xfd_tx_obj *tx_obj,
@@ -88,84 +155,68 @@ mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
ARRAY_SIZE(tx_obj->xfer));
}
-void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+static void
+mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
{
- struct mcp251xfd_tef_ring *tef_ring;
struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
struct mcp251xfd_tx_obj *tx_obj;
- struct spi_transfer *xfer;
u32 val;
u16 addr;
u8 len;
- int i, j;
-
- netdev_reset_queue(priv->ndev);
-
- /* TEF */
- tef_ring = priv->tef;
- tef_ring->head = 0;
- tef_ring->tail = 0;
-
- /* FIFO increment TEF tail pointer */
- addr = MCP251XFD_REG_TEFCON;
- val = MCP251XFD_REG_TEFCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- xfer = &tef_ring->uinc_xfer[j];
- xfer->tx_buf = &tef_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an active
- * chip select after the complete SPI message. This causes the
- * controller to interpret the next register access as
- * data. Set "cs_change" of the last transfer to "0" to
- * properly deactivate the chip select at the end of the
- * message.
- */
- xfer->cs_change = 0;
+ int i;
- /* TX */
tx_ring = priv->tx;
tx_ring->head = 0;
tx_ring->tail = 0;
- tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
+ tx_ring->base = *base;
+ tx_ring->nr = 0;
+ tx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
+ *fifo_nr += 1;
/* FIFO request to send */
- addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
+ addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
addr, val, val);
mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+}
+
+static void
+mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_rx_ring *rx_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i, j;
- /* RX */
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
rx_ring->head = 0;
rx_ring->tail = 0;
+ rx_ring->base = *base;
rx_ring->nr = i;
- rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
+ rx_ring->fifo_nr = *fifo_nr;
- if (!prev_rx_ring)
- rx_ring->base =
- mcp251xfd_get_tx_obj_addr(tx_ring,
- tx_ring->obj_num);
- else
- rx_ring->base = prev_rx_ring->base +
- prev_rx_ring->obj_size *
- prev_rx_ring->obj_num;
+ *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
+ *fifo_nr += 1;
- prev_rx_ring = rx_ring;
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
+ addr, val, val);
+ rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
+ rx_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
+ &rx_ring->irq_enable_xfer, 1);
/* FIFO increment RX tail pointer */
- addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
val = MCP251XFD_REG_FIFOCON_UINC;
len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
addr, val, val);
@@ -187,9 +238,149 @@ void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
* the chip select at the end of the message.
*/
xfer->cs_change = 0;
+
+ /* Use 1st RX-FIFO for IRQ coalescing. If enabled
+ * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
+ * is activated), use the last transfer to disable:
+ *
+ * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
+ *
+ * and enable:
+ *
+ * - TFHRFHIE (Receive FIFO Half Full Interrupt)
+ * - or -
+ * - TFERFFIE (Receive FIFO Full Interrupt)
+ *
+ * depending on rx_max_coalesce_frames_irq.
+ *
+ * The RXOVIE (Overflow Interrupt) is always enabled.
+ */
+ if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
+ priv->rx_obj_num_coalesce_irq)) {
+ val = MCP251XFD_REG_FIFOCON_UINC |
+ MCP251XFD_REG_FIFOCON_RXOVIE;
+
+ if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
+ val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
+ else if (priv->rx_obj_num_coalesce_irq)
+ val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &rx_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
}
}
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u16 base = 0, ram_used;
+ u8 fifo_nr = 1;
+ int i;
+
+ netdev_reset_queue(priv->ndev);
+
+ mcp251xfd_ring_init_tef(priv, &base);
+ mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
+ mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
+
+ /* mcp251xfd_handle_rxif() will iterate over all RX rings.
+ * Rings with their corresponding bit set in
+ * priv->regs_status.rxif are read out.
+ *
+ * If the chip is configured for only 1 RX-FIFO, and if there
+ * is an RX interrupt pending (RXIF in INT register is set),
+ * it must be the 1st RX-FIFO.
+ *
+ * We mark the RXIF of the 1st FIFO as pending here, so that
+ * we can skip the read of the RXIF register in
+ * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
+ *
+ * If we use more than 1 RX-FIFO, this value gets overwritten
+ * in mcp251xfd_read_regs_status(), so set it unconditionally
+ * here.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
+
+ if (priv->tx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx_obj_num_coalesce_irq *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
+ priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+ }
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
+ priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
+
+ if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
+ continue;
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2u*%u bytes = %4u bytes\n",
+ mcp251xfd_get_rx_obj_addr(rx_ring,
+ priv->rx_obj_num_coalesce_irq),
+ rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
+ rx_ring->obj_size,
+ (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
+ rx_ring->obj_size);
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_num * rx_ring->obj_size);
+ }
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ priv->tx->fifo_nr,
+ mcp251xfd_get_tx_obj_addr(priv->tx, 0),
+ priv->tx->obj_num, priv->tx->obj_size,
+ priv->tx->obj_num * priv->tx->obj_size);
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %4d bytes\n",
+ MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
+
+ ram_used = base - MCP251XFD_RAM_START;
+ if (ram_used > MCP251XFD_RAM_SIZE) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ ram_used, MCP251XFD_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
{
int i;
@@ -200,40 +391,103 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
}
}
+static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ rx_irq_timer);
+ struct mcp251xfd_rx_ring *ring = priv->rx[0];
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ tx_irq_timer);
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+const struct can_ram_config mcp251xfd_ram_config = {
+ .rx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
+ .min = MCP251XFD_RX_OBJ_NUM_MIN,
+ .max = MCP251XFD_RX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
+ .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
+ .fifo_num = MCP251XFD_FIFO_RX_NUM,
+ .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .tx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_canfd),
+ .min = MCP251XFD_TX_OBJ_NUM_MIN,
+ .max = MCP251XFD_TX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
+ .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
+ .fifo_num = MCP251XFD_FIFO_TX_NUM,
+ .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .size = MCP251XFD_RAM_SIZE,
+ .fifo_depth = MCP251XFD_FIFO_DEPTH,
+};
+
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
{
- struct mcp251xfd_tx_ring *tx_ring;
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct mcp251xfd_rx_ring *rx_ring;
- int tef_obj_size, tx_obj_size, rx_obj_size;
- int tx_obj_num;
- int ram_free, i;
+ u8 tx_obj_size, rx_obj_size;
+ u8 rem, i;
- tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
- if (mcp251xfd_is_fd_mode(priv)) {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ priv->rx_obj_num = layout.default_rx;
+ tx_ring->obj_num = layout.default_tx;
+ }
+
+ if (fd_mode) {
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
} else {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
- tx_ring = priv->tx;
- tx_ring->obj_num = tx_obj_num;
tx_ring->obj_size = tx_obj_size;
- ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
- (tef_obj_size + tx_obj_size);
-
- for (i = 0;
- i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
- i++) {
- int rx_obj_num;
+ rem = priv->rx_obj_num;
+ for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
+ u8 rx_obj_num;
- rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
- MCP251XFD_RX_OBJ_NUM_MAX);
+ if (i == 0 && priv->rx_obj_num_coalesce_irq)
+ rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
+ MCP251XFD_FIFO_DEPTH);
+ else
+ rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
+ MCP251XFD_FIFO_DEPTH);
+ rem -= rx_obj_num;
rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
GFP_KERNEL);
@@ -241,29 +495,18 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
mcp251xfd_ring_free(priv);
return -ENOMEM;
}
+
rx_ring->obj_num = rx_obj_num;
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
-
- ram_free -= rx_ring->obj_num * rx_ring->obj_size;
}
priv->rx_ring_num = i;
- netdev_dbg(priv->ndev,
- "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
- tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
- tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+ hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- netdev_dbg(priv->ndev,
- "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
- i, rx_ring->obj_num, rx_ring->obj_size,
- rx_ring->obj_size * rx_ring->obj_num);
- }
-
- netdev_dbg(priv->ndev,
- "FIFO setup: free: %d bytes\n",
- ram_free);
+ hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index 63f2526464b3..d09f7fbf2ba7 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -19,7 +19,7 @@
static inline int
mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head)
+ u8 *rx_head, bool *fifo_empty)
{
u32 fifo_sta;
int err;
@@ -30,6 +30,7 @@ mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
return err;
*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+ *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
return 0;
}
@@ -84,10 +85,12 @@ mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
{
u32 new_head;
u8 chip_rx_head;
+ bool fifo_empty;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
- if (err)
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
+ &fifo_empty);
+ if (err || fifo_empty)
return err;
/* chip_rx_head, is the next RX-Object filled by the HW.
@@ -251,10 +254,23 @@ int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
int err, n;
mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ /* - if RX IRQ coalescing is active always handle ring 0
+ * - only handle rings if RX IRQ is active
+ */
+ if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) &&
+ !(priv->regs_status.rxif & BIT(ring->fifo_nr)))
+ continue;
+
err = mcp251xfd_handle_rxif_ring(priv, ring);
if (err)
return err;
}
+ if (priv->rx_coalesce_usecs_irq)
+ hrtimer_start(&priv->rx_irq_timer,
+ ns_to_ktime(priv->rx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index 406166005b99..237617b0c125 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -256,5 +256,11 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
netif_wake_queue(priv->ndev);
}
+ if (priv->tx_coalesce_usecs_irq)
+ hrtimer_start(&priv->tx_irq_timer,
+ ns_to_ktime(priv->tx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index f551c900803e..9cb6b5ad8dda 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,8 +2,8 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019 Pengutronix,
- * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
@@ -367,25 +367,6 @@
#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
-/* number of TX FIFO objects, depending on CAN mode
- *
- * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
- * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
- */
-#define MCP251XFD_RX_OBJ_NUM_MAX 32
-#define MCP251XFD_TX_OBJ_NUM_CAN 8
-#define MCP251XFD_TX_OBJ_NUM_CANFD 4
-
-#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
-#else
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
-#endif
-
-#define MCP251XFD_NAPI_WEIGHT 32
-#define MCP251XFD_TX_FIFO 1
-#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
-
/* SPI commands */
#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
@@ -406,12 +387,38 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
#define MCP251XFD_POLL_SLEEP_US (10)
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+
+/* Misc */
+#define MCP251XFD_NAPI_WEIGHT 32
#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
#define MCP251XFD_READ_CRC_RETRIES_MAX 3
#define MCP251XFD_ECC_CNT_MAX 2
#define MCP251XFD_SANITIZE_SPI 1
#define MCP251XFD_SANITIZE_CAN 1
+/* FIFO and Ring */
+#define MCP251XFD_FIFO_TEF_NUM 1U
+#define MCP251XFD_FIFO_RX_NUM 3U
+#define MCP251XFD_FIFO_TX_NUM 1U
+
+#define MCP251XFD_FIFO_DEPTH 32U
+
+#define MCP251XFD_RX_OBJ_NUM_MIN 16U
+#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH)
+#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U
+#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U
+
+#define MCP251XFD_TX_OBJ_NUM_MIN 2U
+#define MCP251XFD_TX_OBJ_NUM_MAX 16U
+#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U
+#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U
+#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U
+#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U
+
+static_assert(MCP251XFD_FIFO_TEF_NUM == 1U);
+static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM);
+static_assert(MCP251XFD_FIFO_RX_NUM <= 4U);
+
/* Silence TX MAB overflow warnings */
#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
/* Use CRC to access registers */
@@ -512,7 +519,12 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
};
@@ -521,6 +533,8 @@ struct mcp251xfd_tx_ring {
unsigned int tail;
u16 base;
+ u8 nr;
+ u8 fifo_nr;
u8 obj_num;
u8 obj_size;
@@ -538,8 +552,13 @@ struct mcp251xfd_rx_ring {
u8 obj_num;
u8 obj_size;
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
- struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH];
struct mcp251xfd_hw_rx_obj_canfd obj[];
};
@@ -561,6 +580,7 @@ struct mcp251xfd_ecc {
struct mcp251xfd_regs_status {
u32 intf;
+ u32 rxif;
};
enum mcp251xfd_model {
@@ -574,6 +594,13 @@ struct mcp251xfd_devtype_data {
u32 quirks;
};
+enum mcp251xfd_flags {
+ MCP251XFD_FLAGS_DOWN,
+ MCP251XFD_FLAGS_FD_MODE,
+
+ __MCP251XFD_FLAGS_SIZE__
+};
+
struct mcp251xfd_priv {
struct can_priv can;
struct can_rx_offload offload;
@@ -592,12 +619,24 @@ struct mcp251xfd_priv {
struct spi_device *spi;
u32 spi_max_speed_hz_orig;
+ u32 spi_max_speed_hz_fast;
+ u32 spi_max_speed_hz_slow;
+
+ struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM];
+ struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
- struct mcp251xfd_tef_ring tef[1];
- struct mcp251xfd_tx_ring tx[1];
- struct mcp251xfd_rx_ring *rx[1];
+ DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
+ u8 rx_obj_num;
+ u8 rx_obj_num_coalesce_irq;
+ u8 tx_obj_num_coalesce_irq;
+
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_coalesce_usecs_irq;
+ struct hrtimer rx_irq_timer;
+ struct hrtimer tx_irq_timer;
struct mcp251xfd_ecc ecc;
struct mcp251xfd_regs_status regs_status;
@@ -608,6 +647,7 @@ struct mcp251xfd_priv {
struct gpio_desc *rx_int;
struct clk *clk;
+ bool pll_enable;
struct regulator *reg_vdd;
struct regulator *reg_xceiver;
@@ -776,7 +816,7 @@ mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
int err;
err = regmap_read(priv->map_reg,
- MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
+ MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
&fifo_sta);
if (err)
return err;
@@ -878,8 +918,10 @@ int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv);
int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
-void mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
+extern const struct can_ram_config mcp251xfd_ram_config;
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index ec87126e1a7d..c97ffa71fd75 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -69,7 +69,8 @@ static int es58x_fd_echo_msg(struct net_device *netdev,
int i, num_element;
u32 rcv_packet_idx;
- const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8);
+ const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1,
+ BITS_PER_TYPE(echo_msg->packet_idx));
num_element = es58x_msg_num_element(es58x_dev->dev,
es58x_fd_urb_cmd->echo_msg,
@@ -172,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
const struct es58x_fd_rx_event_msg *rx_event_msg;
int ret;
+ rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
if (ret)
return ret;
- rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
-
return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
rx_event_msg->event_code,
get_unaligned_le64(&rx_event_msg->timestamp));
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index d35749fad1ef..67408e316062 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -9,11 +9,12 @@
* Many thanks to all socketcan devs!
*/
+#include <linux/bitfield.h>
#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
#include <linux/usb.h>
#include <linux/can.h>
@@ -21,14 +22,20 @@
#include <linux/can/error.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GSUSB_1_VENDOR_ID 0x1d50
+#define USB_GSUSB_1_PRODUCT_ID 0x606f
-#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2
+#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f
+
+#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
+#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+
+#define GSUSB_ENDPOINT_IN 1
+#define GSUSB_ENDPOINT_OUT 2
/* Device specific constants */
enum gs_usb_breq {
@@ -40,6 +47,11 @@ enum gs_usb_breq {
GS_USB_BREQ_DEVICE_CONFIG,
GS_USB_BREQ_TIMESTAMP,
GS_USB_BREQ_IDENTIFY,
+ GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_SET_USER_ID,
+ GS_USB_BREQ_DATA_BITTIMING,
+ GS_USB_BREQ_BT_CONST_EXT,
};
enum gs_can_mode {
@@ -87,11 +99,18 @@ struct gs_device_config {
__le32 hw_version;
} __packed;
-#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
-#define GS_CAN_MODE_LOOP_BACK BIT(1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_HW_TIMESTAMP BIT(4)
+/* GS_CAN_FEATURE_IDENTIFY BIT(5) */
+/* GS_CAN_FEATURE_USER_ID BIT(6) */
+#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_MODE_FD BIT(8)
+/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
+/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
struct gs_device_mode {
__le32 mode;
@@ -116,12 +135,25 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
-#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
-#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
-#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_USER_ID BIT(6)
+#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_FEATURE_FD BIT(8)
+#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
+#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
+#define GS_CAN_FEATURE_MASK GENMASK(10, 0)
+
+/* internal quirks - keep in GS_CAN_FEATURE space for now */
+
+/* CANtact Pro original firmware:
+ * BREQ DATA_BITTIMING overlaps with GET_USER_ID
+ */
+#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31)
struct gs_device_bt_const {
__le32 feature;
@@ -136,7 +168,50 @@ struct gs_device_bt_const {
__le32 brp_inc;
} __packed;
-#define GS_CAN_FLAG_OVERFLOW 1
+struct gs_device_bt_const_extended {
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
+
+ __le32 dtseg1_min;
+ __le32 dtseg1_max;
+ __le32 dtseg2_min;
+ __le32 dtseg2_max;
+ __le32 dsjw_max;
+ __le32 dbrp_min;
+ __le32 dbrp_max;
+ __le32 dbrp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW BIT(0)
+#define GS_CAN_FLAG_FD BIT(1)
+#define GS_CAN_FLAG_BRS BIT(2)
+#define GS_CAN_FLAG_ESI BIT(3)
+
+struct classic_can {
+ u8 data[8];
+} __packed;
+
+struct classic_can_quirk {
+ u8 data[8];
+ u8 quirk;
+} __packed;
+
+struct canfd {
+ u8 data[64];
+} __packed;
+
+struct canfd_quirk {
+ u8 data[64];
+ u8 quirk;
+} __packed;
struct gs_host_frame {
u32 echo_id;
@@ -147,7 +222,12 @@ struct gs_host_frame {
u8 flags;
u8 reserved;
- u8 data[8];
+ union {
+ DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
+ DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
+ };
} __packed;
/* The GS USB devices make use of the same flags and masks as in
* linux/can.h and linux/can/error.h, and no additional mapping is necessary.
@@ -158,9 +238,9 @@ struct gs_host_frame {
/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
#define GS_MAX_RX_URBS 30
/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 2 interfaces. The future may vary.
+ * Current hardware only supports 3 interfaces. The future may vary.
*/
-#define GS_MAX_INTF 2
+#define GS_MAX_INTF 3
struct gs_tx_context {
struct gs_can *dev;
@@ -176,9 +256,12 @@ struct gs_can {
struct usb_device *udev;
struct usb_interface *iface;
- struct can_bittiming_const bt_const;
+ struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ u32 feature;
+ unsigned int hf_size_tx;
+
/* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -192,6 +275,7 @@ struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
struct usb_device *udev;
+ unsigned int hf_size_rx;
u8 active_channels;
};
@@ -258,11 +342,7 @@ static int gs_cmd_reset(struct gs_can *gsdev)
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ gsdev->channel, 0, dm, sizeof(*dm), 1000);
kfree(dm);
@@ -304,6 +384,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
struct gs_host_frame *hf = urb->transfer_buffer;
struct gs_tx_context *txc;
struct can_frame *cf;
+ struct canfd_frame *cfd;
struct sk_buff *skb;
BUG_ON(!usbcan);
@@ -332,18 +413,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
if (hf->echo_id == -1) { /* normal rx */
- skb = alloc_can_skb(dev->netdev, &cf);
- if (!skb)
- return;
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ skb = alloc_canfd_skb(dev->netdev, &cfd);
+ if (!skb)
+ return;
+
+ cfd->can_id = le32_to_cpu(hf->can_id);
+ cfd->len = can_fd_dlc2len(hf->can_dlc);
+ if (hf->flags & GS_CAN_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (hf->flags & GS_CAN_FLAG_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, hf->canfd->data, cfd->len);
+ } else {
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
- cf->can_id = le32_to_cpu(hf->can_id);
+ cf->can_id = le32_to_cpu(hf->can_id);
+ can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->data, 8);
+ memcpy(cf->data, hf->classic_can->data, 8);
- /* ERROR frames tell us information about the controller */
- if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
- gs_update_state(dev, cf);
+ /* ERROR frames tell us information about the controller */
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+ }
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -392,14 +488,10 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb,
- usbcan->udev,
+ usb_fill_bulk_urb(urb, usbcan->udev,
usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
- hf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- usbcan
- );
+ hf, dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, usbcan);
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -436,11 +528,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_BITTIMING,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dbt,
- sizeof(*dbt),
- 1000);
+ dev->channel, 0, dbt, sizeof(*dbt), 1000);
kfree(dbt);
@@ -451,6 +539,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
return (rc > 0) ? 0 : rc;
}
+static int gs_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct usb_interface *intf = dev->iface;
+ struct gs_device_bittiming *dbt;
+ u8 request = GS_USB_BREQ_DATA_BITTIMING;
+ int rc;
+
+ dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+ if (!dbt)
+ return -ENOMEM;
+
+ dbt->prop_seg = cpu_to_le32(bt->prop_seg);
+ dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
+ dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
+ dbt->sjw = cpu_to_le32(bt->sjw);
+ dbt->brp = cpu_to_le32(bt->brp);
+
+ if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
+ request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
+
+ /* request bit timings */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, dbt, sizeof(*dbt), 1000);
+
+ kfree(dbt);
+
+ if (rc < 0)
+ dev_err(netdev->dev.parent,
+ "Couldn't set data bittimings (err=%d)", rc);
+
+ return (rc > 0) ? 0 : rc;
+}
+
static void gs_usb_xmit_callback(struct urb *urb)
{
struct gs_tx_context *txc = urb->context;
@@ -460,10 +586,8 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
- usb_free_coherent(urb->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -474,6 +598,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
struct urb *urb;
struct gs_host_frame *hf;
struct can_frame *cf;
+ struct canfd_frame *cfd;
int rc;
unsigned int idx;
struct gs_tx_context *txc;
@@ -491,7 +616,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+ hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC,
&urb->transfer_dma);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
@@ -510,19 +635,31 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf->flags = 0;
hf->reserved = 0;
- cf = (struct can_frame *)skb->data;
+ if (can_is_canfd_skb(skb)) {
+ cfd = (struct canfd_frame *)skb->data;
- hf->can_id = cpu_to_le32(cf->can_id);
- hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+ hf->can_id = cpu_to_le32(cfd->can_id);
+ hf->can_dlc = can_fd_len2dlc(cfd->len);
+ hf->flags |= GS_CAN_FLAG_FD;
+ if (cfd->flags & CANFD_BRS)
+ hf->flags |= GS_CAN_FLAG_BRS;
+ if (cfd->flags & CANFD_ESI)
+ hf->flags |= GS_CAN_FLAG_ESI;
- memcpy(hf->data, cf->data, cf->len);
+ memcpy(hf->canfd->data, cfd->data, cfd->len);
+ } else {
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cpu_to_le32(cf->can_id);
+ hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+ memcpy(hf->classic_can->data, cf->data, cf->len);
+ }
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
- hf,
- sizeof(*hf),
- gs_usb_xmit_callback,
- txc);
+ hf, dev->hf_size_tx,
+ gs_usb_xmit_callback, txc);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
@@ -539,10 +676,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -562,10 +697,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
nomem_hf:
usb_free_urb(urb);
@@ -582,6 +715,7 @@ static int gs_can_open(struct net_device *netdev)
struct gs_usb *parent = dev->parent;
int rc, i;
struct gs_device_mode *dm;
+ struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
@@ -589,6 +723,21 @@ static int gs_can_open(struct net_device *netdev)
if (rc)
return rc;
+ ctrlmode = dev->can.ctrlmode;
+ if (ctrlmode & CAN_CTRLMODE_FD) {
+ flags |= GS_CAN_MODE_FD;
+
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, classic_can, 1);
+ }
+
if (!parent->active_channels) {
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
@@ -601,7 +750,7 @@ static int gs_can_open(struct net_device *netdev)
/* alloc rx buffer */
buf = usb_alloc_coherent(dev->udev,
- sizeof(struct gs_host_frame),
+ dev->parent->hf_size_rx,
GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
@@ -617,9 +766,8 @@ static int gs_can_open(struct net_device *netdev)
usb_rcvbulkpipe(dev->udev,
GSUSB_ENDPOINT_IN),
buf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- parent);
+ dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, parent);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -630,8 +778,7 @@ static int gs_can_open(struct net_device *netdev)
netif_device_detach(dev->netdev);
netdev_err(netdev,
- "usb_submit failed (err=%d)\n",
- rc);
+ "usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
usb_free_urb(urb);
@@ -650,8 +797,6 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* flags */
- ctrlmode = dev->can.ctrlmode;
-
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -672,13 +817,8 @@ static int gs_can_open(struct net_device *netdev)
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, dm, sizeof(*dm), 1000);
if (rc < 0) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
@@ -755,16 +895,10 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface),
- 0),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- imode,
- sizeof(*imode),
- 100);
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, imode, sizeof(*imode), 100);
kfree(imode);
@@ -803,6 +937,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct net_device *netdev;
int rc;
struct gs_device_bt_const *bt_const;
+ struct gs_device_bt_const_extended *bt_const_extended;
u32 feature;
bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
@@ -814,11 +949,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
GS_USB_BREQ_BT_CONST,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel,
- 0,
- bt_const,
- sizeof(*bt_const),
- 1000);
+ channel, 0, bt_const, sizeof(*bt_const), 1000);
if (rc < 0) {
dev_err(&intf->dev,
@@ -875,6 +1006,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
feature = le32_to_cpu(bt_const->feature);
+ dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -887,7 +1019,37 @@ static struct gs_can *gs_make_candev(unsigned int channel,
if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- SET_NETDEV_DEV(netdev, &intf->dev);
+ if (feature & GS_CAN_FEATURE_FD) {
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ /* The data bit timing will be overwritten, if
+ * GS_CAN_FEATURE_BT_CONST_EXT is set.
+ */
+ dev->can.data_bittiming_const = &dev->bt_const;
+ dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ }
+
+ /* The CANtact Pro from LinkLayer Labs is based on the
+ * LPC54616 µC, which is affected by the NXP LPC USB transfer
+ * erratum. However, the current firmware (version 2) doesn't
+ * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the
+ * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround
+ * this issue.
+ *
+ * For the GS_USB_BREQ_DATA_BITTIMING USB control message the
+ * CANtact Pro firmware uses a request value, which is already
+ * used by the candleLight firmware for a different purpose
+ * (GS_USB_BREQ_GET_USER_ID). Set the feature
+ * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
+ * issue.
+ */
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) &&
+ dev->udev->manufacturer && dev->udev->product &&
+ !strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
+ !strcmp(dev->udev->product, "CANtact Pro") &&
+ (le32_to_cpu(dconf->sw_version) <= 2))
+ dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
+ GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
if (le32_to_cpu(dconf->sw_version) > 1)
if (feature & GS_CAN_FEATURE_IDENTIFY)
@@ -895,6 +1057,45 @@ static struct gs_can *gs_make_candev(unsigned int channel,
kfree(bt_const);
+ /* fetch extended bit timing constants if device has feature
+ * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
+ */
+ if (feature & GS_CAN_FEATURE_FD &&
+ feature & GS_CAN_FEATURE_BT_CONST_EXT) {
+ bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL);
+ if (!bt_const_extended)
+ return ERR_PTR(-ENOMEM);
+
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, bt_const_extended,
+ sizeof(*bt_const_extended),
+ 1000);
+ if (rc < 0) {
+ dev_err(&intf->dev,
+ "Couldn't get extended bit timing const for channel (err=%d)\n",
+ rc);
+ kfree(bt_const_extended);
+ return ERR_PTR(rc);
+ }
+
+ strcpy(dev->data_bt_const.name, "gs_usb");
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
+
+ dev->can.data_bittiming_const = &dev->data_bt_const;
+ }
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
rc = register_candev(dev->netdev);
if (rc) {
free_candev(dev->netdev);
@@ -915,6 +1116,8 @@ static void gs_destroy_candev(struct gs_can *dev)
static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct gs_host_frame *hf;
struct gs_usb *dev;
int rc = -ENOMEM;
unsigned int icount, i;
@@ -928,21 +1131,16 @@ static int gs_usb_probe(struct usb_interface *intf,
hconf->byte_order = cpu_to_le32(0x0000beef);
/* send host config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
- 1000);
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ hconf, sizeof(*hconf), 1000);
kfree(hconf);
if (rc < 0) {
- dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
- rc);
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
@@ -951,15 +1149,11 @@ static int gs_usb_probe(struct usb_interface *intf,
return -ENOMEM;
/* read device config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
- 1000);
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ dconf, sizeof(*dconf), 1000);
if (rc < 0) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
@@ -985,9 +1179,13 @@ static int gs_usb_probe(struct usb_interface *intf,
}
init_usb_anchor(&dev->rx_submitted);
+ /* default to classic CAN, switch to CAN-FD if at least one of
+ * our channels support CAN-FD.
+ */
+ dev->hf_size_rx = struct_size(hf, classic_can, 1);
usb_set_intfdata(intf, dev);
- dev->udev = interface_to_usbdev(intf);
+ dev->udev = udev;
for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf, dconf);
@@ -1006,6 +1204,9 @@ static int gs_usb_probe(struct usb_interface *intf,
return rc;
}
dev->canch[i]->parent = dev;
+
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD)
+ dev->hf_size_rx = struct_size(hf, canfd, 1);
}
kfree(dconf);
@@ -1015,8 +1216,9 @@ static int gs_usb_probe(struct usb_interface *intf,
static void gs_usb_disconnect(struct usb_interface *intf)
{
- unsigned i;
struct gs_usb *dev = usb_get_intfdata(intf);
+ unsigned int i;
+
usb_set_intfdata(intf, NULL);
if (!dev) {
@@ -1037,16 +1239,20 @@ static const struct usb_device_id gs_usb_table[] = {
USB_GSUSB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
+ USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
+ USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
- .probe = gs_usb_probe,
+ .name = "gs_usb",
+ .probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
- .id_table = gs_usb_table,
+ .id_table = gs_usb_table,
};
module_usb_driver(gs_usb_driver);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index c4b4d3d0a387..e67658b53d02 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -205,12 +205,10 @@ MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
{
- int actual_len; /* Not used */
-
return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
- cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+ cmd, len, NULL, KVASER_USB_TIMEOUT);
}
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index c7c41d1fd038..5ae0d7c017cc 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1392,7 +1392,7 @@ static int ucan_probe(struct usb_interface *intf,
* Stage 3 for the final driver initialisation.
*/
- /* Prepare Memory for control transferes */
+ /* Prepare Memory for control transfers */
ctl_msg_buffer = devm_kzalloc(&udev->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
@@ -1526,7 +1526,7 @@ static int ucan_probe(struct usb_interface *intf,
ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
sizeof(union ucan_ctl_payload));
if (ret > 0) {
- /* copy string while ensuring zero terminiation */
+ /* copy string while ensuring zero termination */
strncpy(firmware_str, up->ctl_msg_buffer->raw,
sizeof(union ucan_ctl_payload));
firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index c42f18845b02..a15619d883ec 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -80,7 +80,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 47ccc15a3486..577a80300514 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -33,28 +33,33 @@ struct vxcan_priv {
struct net_device __rcu *peer;
};
-static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
+ struct sk_buff *skb;
u8 len;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (unlikely(!peer)) {
- kfree_skb(skb);
+ kfree_skb(oskb);
dev->stats.tx_dropped++;
goto out_unlock;
}
- skb = can_create_echo_skb(skb);
- if (!skb)
+ skb = skb_clone(oskb, GFP_ATOMIC);
+ if (skb) {
+ consume_skb(oskb);
+ } else {
+ kfree_skb(oskb);
goto out_unlock;
+ }
/* reset CAN GW hop counter */
skb->csum_start = 0;
@@ -63,7 +68,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed = CHECKSUM_UNNECESSARY;
len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
- if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
peerstats = &peer->stats;
@@ -148,7 +153,7 @@ static void vxcan_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
- dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
dev->needs_free_netdev = true;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 1674b561c9a2..e562c5ab1149 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1215,10 +1215,11 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
}
if (work_done < quota) {
- napi_complete_done(napi, work_done);
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= xcan_rx_int_mask(priv);
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ if (napi_complete_done(napi, work_done)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= xcan_rx_int_mask(priv);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
}
return work_done;
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 0029d279616f..37a3dabdce31 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -68,17 +68,7 @@ config NET_DSA_QCA8K
This enables support for the Qualcomm Atheros QCA8K Ethernet
switch chips.
-config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI Ethernet switch family support"
- select NET_DSA_TAG_RTL4_A
- select NET_DSA_TAG_RTL8_4
- select FIXED_PHY
- select IRQ_DOMAIN
- select REALTEK_PHY
- select REGMAP
- help
- This enables support for the Realtek SMI-based switch
- chips, currently only RTL8366RB.
+source "drivers/net/dsa/realtek/Kconfig"
config NET_DSA_SMSC_LAN9303
tristate
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8da1569a34e6..e73838c12256 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -9,8 +9,6 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
@@ -23,5 +21,6 @@ obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
+obj-y += realtek/
obj-y += sja1105/
obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3867f3d4545f..77501f9c5915 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1309,46 +1309,50 @@ void b53_port_event(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_port_event);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct b53_device *dev = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (dev->ops->serdes_phylink_validate)
- dev->ops->serdes_phylink_validate(dev, port, mask, state);
+ /* Internal ports need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* These switches appear to support MII and RevMII too, but beyond
+ * this, the code gives very few clues. FIXME: We probably need more
+ * interface modes here.
+ *
+ * According to b53_srab_mux_init(), ports 3..5 can support:
+ * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
+ * However, the interface mode read from the MUX configuration is
+ * not passed back to DSA, so phylink uses NA.
+ * DT can specify RGMII for ports 0, 1.
+ * For MDIO, port 8 can be RGMII_TXID.
+ */
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
- /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
- * support Gigabit, including Half duplex.
+ /* 5325/5365 are not capable of gigabit speeds, everything else is.
+ * Note: the original code also exclulded Gigagbit for MII, RevMII
+ * and 802.3z modes. MII and RevMII are not able to work above 100M,
+ * so will be excluded by the generic validator implementation.
+ * However, the exclusion of Gigabit for 802.3z just seems wrong.
*/
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !(is5325(dev) || is5365(dev))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
+ if (!(is5325(dev) || is5365(dev)))
+ config->mac_capabilities |= MAC_1000;
- if (!phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ /* Get the implementation specific capabilities */
+ if (dev->ops->phylink_get_caps)
+ dev->ops->phylink_get_caps(dev, port, config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- phylink_helper_basex_speed(state);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
}
-EXPORT_SYMBOL(b53_phylink_validate);
int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
struct phylink_link_state *state)
@@ -1704,7 +1708,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
}
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1724,7 +1729,8 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_add);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1825,7 +1831,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_dump);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1845,7 +1852,8 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_add);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1861,7 +1869,7 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_del);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
@@ -2102,7 +2110,8 @@ out:
EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 reg, loc;
@@ -2186,7 +2195,7 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
- ret = phy_init_eee(phy, 0);
+ ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2259,7 +2268,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
- .phylink_validate = b53_phylink_validate,
+ .phylink_get_caps = b53_phylink_get_caps,
.phylink_mac_link_state = b53_phylink_mac_link_state,
.phylink_mac_config = b53_phylink_mac_config,
.phylink_mac_an_restart = b53_phylink_mac_an_restart,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index b41dc8ac2ca8..3085b6cc7d40 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -46,6 +46,8 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phylink_get_caps)(struct b53_device *dev, int port,
+ struct phylink_config *config);
u8 (*serdes_map_lane)(struct b53_device *dev, int port);
int (*serdes_link_state)(struct b53_device *dev, int port,
struct phylink_link_state *state);
@@ -56,9 +58,6 @@ struct b53_io_ops {
void (*serdes_link_set)(struct b53_device *dev, int port,
unsigned int mode, phy_interface_t interface,
bool link_up);
- void (*serdes_phylink_validate)(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
};
#define B53_INVALID_LANE 0xff
@@ -325,7 +324,7 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
- bool *tx_fwd_offload);
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
@@ -337,9 +336,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -363,17 +359,22 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 5ae3d9783b68..555e5b372321 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -158,9 +158,8 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
}
EXPORT_SYMBOL(b53_serdes_link_set);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
{
u8 lane = b53_serdes_map_lane(dev, port);
@@ -169,16 +168,24 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port,
switch (lane) {
case 0:
- phylink_set(supported, 2500baseX_Full);
+ /* It appears lane 0 supports 2500base-X and 1000base-X */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
fallthrough;
case 1:
- phylink_set(supported, 1000baseX_Full);
+ /* It appears lane 1 only supports 1000base-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000FD;
break;
default:
break;
}
}
-EXPORT_SYMBOL(b53_serdes_phylink_validate);
+EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
int b53_serdes_init(struct b53_device *dev, int port)
{
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
index 55d280fe38e4..f47d5caa7557 100644
--- a/drivers/net/dsa/b53/b53_serdes.h
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -115,9 +115,8 @@ void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
void b53_serdes_an_restart(struct b53_device *dev, int port);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config);
#if IS_ENABLED(CONFIG_B53_SERDES)
int b53_serdes_init(struct b53_device *dev, int port);
#else
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 4591bb1c05d2..c51b716657db 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -443,6 +443,39 @@ static void b53_srab_irq_disable(struct b53_device *dev, int port)
}
}
+static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ switch (p->mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ /* If p->mode indicates SGMII mode, that essentially means we
+ * are using a serdes. As the serdes for the capabilities.
+ */
+ b53_serdes_phylink_get_caps(dev, port, config);
+#endif
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* If we support RGMII, support all RGMII modes, since
+ * that dictates the PHY delay settings.
+ */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+
+ default:
+ /* Some other mode (e.g. MII, GMII etc) */
+ __set_bit(p->mode, config->supported_interfaces);
+ break;
+ }
+}
+
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
@@ -456,13 +489,13 @@ static const struct b53_io_ops b53_srab_ops = {
.write64 = b53_srab_write64,
.irq_enable = b53_srab_irq_enable,
.irq_disable = b53_srab_irq_disable,
+ .phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
.serdes_map_lane = b53_srab_serdes_map_lane,
.serdes_link_state = b53_serdes_link_state,
.serdes_config = b53_serdes_config,
.serdes_an_restart = b53_serdes_an_restart,
.serdes_link_set = b53_serdes_link_set,
- .serdes_phylink_validate = b53_serdes_phylink_validate,
#endif
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 6afb5db8244c..cf82b1fa9725 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -712,49 +712,25 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
PHY_BRCM_IDDQ_SUSPEND;
}
-static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
+ unsigned long *interfaces = config->supported_interfaces;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_MOCA) {
- linkmode_zero(supported);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID))
- dev_err(ds->dev,
- "Unsupported interface: %d for port %d\n",
- state->interface, port);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ if (priv->int_phy_mask & BIT(port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ } else if (priv->moca_port == port) {
+ __set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ phy_interface_set_rgmii(interfaces);
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
@@ -1221,7 +1197,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phylink_validate = bcm_sf2_sw_validate,
+ .phylink_get_caps = bcm_sf2_sw_get_caps,
.phylink_mac_config = bcm_sf2_sw_mac_config,
.phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
.phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 33daaf10c488..263e41191c29 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -168,7 +168,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
__func__, port, bridge.dev->name);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 726f267cb228..ac1f3b3a7040 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -675,7 +675,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
@@ -827,7 +828,8 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek,
}
static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -872,7 +874,8 @@ out:
}
static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index b3bc948d6145..ffd06cf8c44f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -331,7 +331,7 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek,
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 3969d89fa4db..e03ff1f267bb 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1111,7 +1111,8 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct lan9303 *chip = ds->priv;
@@ -1188,7 +1189,8 @@ static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
}
static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1200,8 +1202,8 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1245,7 +1247,8 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
int err;
@@ -1260,7 +1263,8 @@ static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 8a7a8093a156..a416240d001b 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -213,6 +213,7 @@
#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
/* Ethernet Switch Fetch DMA Port Control Register */
@@ -239,6 +240,15 @@
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
struct gswip_hw_info {
int max_ports;
int cpu_port;
@@ -863,10 +873,6 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
@@ -883,6 +889,8 @@ static int gswip_setup(struct dsa_switch *ds)
return err;
}
+ ds->mtu_enforcement_ingress = true;
+
gswip_port_enable(ds, cpu_port, NULL);
ds->configure_vlan_while_not_filtering = false;
@@ -1152,7 +1160,8 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
@@ -1389,13 +1398,15 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
}
static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, true);
}
static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, false);
}
@@ -1446,6 +1457,39 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+ int cpu_port = priv->hw_info->cpu_port;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (port == cpu_port) {
+ new_mtu += 8;
+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(port));
+ else
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+ GSWIP_MAC_CTRL_2p(port));
+
+ return 0;
+}
+
static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -1791,6 +1835,8 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
@@ -1815,6 +1861,8 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 9d611895d3cf..03da369675c6 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -16,6 +16,7 @@ enum ksz_regs {
REG_IND_DATA_HI,
REG_IND_DATA_LO,
REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
P_FORCE_CTRL,
P_LINK_STATUS,
P_LOCAL_CTRL,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 991b9c6b6ce7..b2752978cb09 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -33,6 +33,7 @@ static const u8 ksz8795_regs[] = {
[REG_IND_DATA_HI] = 0x71,
[REG_IND_DATA_LO] = 0x75,
[REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
[P_FORCE_CTRL] = 0x0C,
[P_LINK_STATUS] = 0x0E,
[P_LOCAL_CTRL] = 0x07,
@@ -222,6 +223,25 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+ struct ksz8 *ksz8 = dev->priv;
+ const u8 *regs = ksz8->regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ if (!ret)
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
static int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
@@ -1213,7 +1233,7 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
@@ -1391,6 +1411,23 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
}
}
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* KSZ87xx Errata DS80000687C.
+ * Module 2: Link drops with some EEE link partners.
+ * An issue with the EEE next page exchange between the
+ * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+ * the link dropping.
+ */
+ if (dev->ksz87xx_eee_link_erratum)
+ ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+ return ret;
+}
+
static int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -1458,30 +1495,25 @@ static int ksz8_setup(struct dsa_switch *ds)
ds->configure_vlan_while_not_filtering = false;
- return 0;
+ return ksz8_handle_global_errata(ds);
}
-static void ksz8_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ksz8_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct ksz_device *dev = ds->priv;
if (port == dev->cpu_port) {
- if (state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
} else {
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
}
- /* Allow all the expected bits */
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
+ config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
* "Port 1 does not respond to received flow control PAUSE frames"
@@ -1489,27 +1521,11 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
* switches.
*/
if (!ksz_is_ksz88x3(dev) || port)
- phylink_set(mask, Pause);
+ config->mac_capabilities |= MAC_SYM_PAUSE;
/* Asym pause is not supported on KSZ8863 and KSZ8873 */
if (!ksz_is_ksz88x3(dev))
- phylink_set(mask, Asym_Pause);
-
- /* 10M and 100M are only supported */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities |= MAC_ASYM_PAUSE;
}
static const struct dsa_switch_ops ksz8_switch_ops = {
@@ -1518,7 +1534,7 @@ static const struct dsa_switch_ops ksz8_switch_ops = {
.setup = ksz8_setup,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
- .phylink_validate = ksz8_validate,
+ .phylink_get_caps = ksz8_get_caps,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
.get_strings = ksz8_get_strings,
@@ -1596,6 +1612,7 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ bool ksz87xx_eee_link_erratum;
};
static const struct ksz_chip_data ksz8_switch_chips[] = {
@@ -1607,6 +1624,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
},
{
/*
@@ -1630,6 +1648,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 4, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
},
{
.chip_id = 0x8765,
@@ -1639,6 +1658,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .ksz87xx_eee_link_erratum = true,
},
{
.chip_id = 0x8830,
@@ -1673,6 +1693,8 @@ static int ksz8_switch_init(struct ksz_device *dev)
dev->host_mask = chip->cpu_ports;
dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
chip->cpu_ports;
+ dev->ksz87xx_eee_link_erratum =
+ chip->ksz87xx_eee_link_erratum;
break;
}
}
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 6b40bc25f7ff..d74defcd86b4 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -812,6 +812,10 @@
#define IND_ACC_TABLE(table) ((table) << 8)
+/* */
+#define REG_IND_EEE_GLOB2_LO 0x34
+#define REG_IND_EEE_GLOB2_HI 0x35
+
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROT_RATE 10
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 353b5f981740..8222c8a6c5ec 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -11,6 +11,7 @@
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include <net/switchdev.h>
@@ -64,6 +65,100 @@ static const struct {
{ 0x83, "tx_discards" },
};
+struct ksz9477_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct rtnl_link_stats64 *stats;
+ struct ksz9477_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ raw = (struct ksz9477_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz9477_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -88,6 +183,29 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
+static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 frame_size, max_frame = 0;
+ int i;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ /* Cache the per-port MTU setting */
+ dev->ports[port].max_frame = frame_size;
+
+ for (i = 0; i < dev->port_cnt; i++)
+ max_frame = max(max_frame, dev->ports[i].max_frame);
+
+ return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
+ REG_SW_MTU_MASK, max_frame);
+}
+
+static int ksz9477_max_mtu(struct dsa_switch *ds, int port)
+{
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -222,9 +340,12 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
(BROADCAST_STORM_VALUE *
BROADCAST_STORM_PROT_RATE) / 100);
- if (dev->synclko_125)
- ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ);
+ data8 = SW_ENABLE_REFCLKO;
+ if (dev->synclko_disable)
+ data8 = 0;
+ else if (dev->synclko_125)
+ data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
return 0;
}
@@ -543,7 +664,8 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
}
static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 alu_table[4];
@@ -600,7 +722,8 @@ exit:
}
static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 alu_table[4];
@@ -742,7 +865,8 @@ exit:
}
static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
@@ -817,7 +941,8 @@ exit:
}
static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
u32 static_table[4];
@@ -893,7 +1018,7 @@ exit:
static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
@@ -1315,8 +1440,14 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* Do not work correctly with tail tagging. */
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ return ret;
ksz9477_config_cpu_port(ds);
@@ -1362,6 +1493,9 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
.port_mdb_del = ksz9477_port_mdb_del,
.port_mirror_add = ksz9477_port_mirror_add,
.port_mirror_del = ksz9477_port_mirror_del,
+ .get_stats64 = ksz9477_get_stats64,
+ .port_change_mtu = ksz9477_change_mtu,
+ .port_max_mtu = ksz9477_max_mtu,
};
static u32 ksz9477_get_port_addr(int port, int offset)
@@ -1521,6 +1655,7 @@ static int ksz9477_switch_init(struct ksz_device *dev)
if (!dev->ports)
return -ENOMEM;
for (i = 0; i < dev->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
mutex_init(&dev->ports[i].mib.cnt_mutex);
dev->ports[i].mib.counters =
devm_kzalloc(dev->dev,
@@ -1549,6 +1684,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz9477_r_mib_stats64,
.freeze_mib = ksz9477_freeze_mib,
.port_init_cnt = ksz9477_port_init_cnt,
.shutdown = ksz9477_reset_switch,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index f3afb8b8c4cc..cbc0b20e7e1b 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -92,6 +92,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9893" },
{ .compatible = "microchip,ksz9563" },
{ .compatible = "microchip,ksz9567" },
+ { .compatible = "microchip,ksz8563" },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 16939f29faa5..0bd58467181f 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -176,6 +176,7 @@
#define REG_SW_MAC_ADDR_5 0x0307
#define REG_SW_MTU__2 0x0308
+#define REG_SW_MTU_MASK GENMASK(13, 0)
#define REG_SW_ISP_TPID__2 0x030A
@@ -1662,4 +1663,6 @@
/* 148,800 frames * 67 ms / 100 */
#define BROADCAST_STORM_VALUE 9969
+#define KSZ9477_MAX_FRAME_SIZE 9000
+
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 243f8ad6d06e..8014b18d9391 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -130,6 +130,10 @@ static void ksz_mib_read_work(struct work_struct *work)
}
port_r_cnt(dev, i);
p->read = false;
+
+ if (dev->dev_ops->r_mib_stat64)
+ dev->dev_ops->r_mib_stat64(dev, i);
+
mutex_unlock(&mib->cnt_mutex);
}
@@ -213,7 +217,8 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -272,7 +277,8 @@ int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -317,7 +323,8 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
struct alu_struct alu;
@@ -454,6 +461,12 @@ int ksz_switch_register(struct ksz_device *dev,
}
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-disable");
+ if (dev->synclko_125 && dev->synclko_disable) {
+ dev_err(dev->dev, "inconsistent synclko settings\n");
+ return -EINVAL;
+ }
}
ret = dsa_register_switch(dev->ds);
@@ -463,7 +476,7 @@ int ksz_switch_register(struct ksz_device *dev,
}
/* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
+ dev->mib_read_interval = msecs_to_jiffies(5000);
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index df8ae59c8525..485d4a948c38 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -22,6 +22,8 @@ struct ksz_port_mib {
struct mutex cnt_mutex; /* structure access */
u8 cnt_ptr;
u64 *counters;
+ struct rtnl_link_stats64 stats64;
+ struct spinlock stats64_lock;
};
struct ksz_port {
@@ -39,6 +41,7 @@ struct ksz_port {
struct ksz_port_mib mib;
phy_interface_t interface;
+ u16 max_frame;
};
struct ksz_device {
@@ -74,7 +77,9 @@ struct ksz_device {
phy_interface_t compat_interface;
u32 regs_size;
bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
bool synclko_125;
+ bool synclko_disable;
struct vlan_table *vlan_cache;
@@ -127,6 +132,7 @@ struct ksz_dev_ops {
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
int (*shutdown)(struct ksz_device *dev);
@@ -155,16 +161,19 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload);
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
void ksz_port_fast_age(struct dsa_switch *ds, int port);
int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
void *data);
int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
/* Common register access functions */
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a251bc55727f..19f0035d4410 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1186,7 +1186,8 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
u32 port_bitmap = BIT(MT7530_CPU_PORT);
@@ -1349,7 +1350,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1365,7 +1367,8 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1416,7 +1419,8 @@ err:
static int
mt7530_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1442,7 +1446,8 @@ mt7530_port_mdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1709,7 +1714,7 @@ static int mt753x_mirror_port_set(unsigned int id, u32 val)
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
int monitor_port;
@@ -2846,7 +2851,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr |= PMCR_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) {
+ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_EEE1G;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ab1676553714..64f4fdd02902 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -86,12 +86,16 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- /* There's no bus specific operation to wait for a mask */
- for (i = 0; i < 16; i++) {
+ /* There's no bus specific operation to wait for a mask. Even
+ * if the initial poll takes longer than 50ms, always do at
+ * least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
@@ -99,7 +103,10 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
if ((data & mask) == val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout while waiting for switch\n");
@@ -563,133 +570,268 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static const u8 mv88e6185_phy_interface_modes[] = {
+ [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
+};
+
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ u8 cmode = chip->ports[port].cmode;
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+
+ if (mv88e6xxx_phy_is_internal(chip->ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
}
}
-static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- /* FIXME: if the port is in 1000Base-X mode, then it only supports
- * 1000M FD speeds. In this case, CMODE will indicate 5.
+ u8 cmode = chip->ports[port].cmode;
+
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
+static const u8 mv88e6xxx_phy_interface_modes[] = {
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ /* higher interface modes are not needed here, since ports supporting
+ * them are writable, and so the supported interfaces are filled in the
+ * corresponding .phylink_set_interfaces() implementation below
*/
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+};
- mv88e6065_phylink_validate(chip, port, mask, state);
+static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
+{
+ if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
+ mv88e6xxx_phy_interface_modes[cmode])
+ __set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
+ else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
+ phy_interface_set_rgmii(supported);
}
-static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 5)
- phylink_set(mask, 2500baseX_Full);
+ unsigned long *supported = config->supported_interfaces;
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
-static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ u16 reg, val;
+ int err;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ /* If PHY_DETECT is zero, then we are not in auto-media mode */
+ if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
+ return 0xf;
+
+ val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ if (err)
+ return err;
+
+ /* Restore PHY_DETECT value */
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
+
+ return val & MV88E6XXX_PORT_STS_CMODE_MASK;
}
-static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
+ unsigned long *supported = config->supported_interfaces;
+ int err, cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+ goto unlock;
+
+ cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+unlock:
+ mv88e6xxx_reg_unlock(chip);
}
+}
+
+static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ /* The C_Mode field is programmable on port 5 */
+ if (port == 5) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
+ }
}
-static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ /* No ethtool bits for 200Mbps */
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on ports 9 and 10 */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
}
+}
- mv88e6390_phylink_validate(chip, port, mask, state);
+static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ mv88e6390_phylink_get_caps(chip, port, config);
+
+ /* For the 6x90X, ports 2-7 can be in automedia mode.
+ * (Note that 6x90 doesn't support RXAUI nor XAUI).
+ *
+ * Port 2 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * Port 5 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * For now, be permissive (as the old code was) and allow 1000BASE-X
+ * on ports 2..7.
+ */
+ if (port >= 2 && port <= 7)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* The C_Mode field can also be programmed for 10G speeds */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI, supported);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
+
+ config->mac_capabilities |= MAC_10000FD;
+ }
}
-static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
+ unsigned long *supported = config->supported_interfaces;
bool is_6191x =
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
- if (((port == 0 || port == 9) && !is_6191x) || port == 10) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- }
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field can be programmed for ports 0, 9 and 10 */
+ if (port == 0 || port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* 6191X supports >1G modes only on port 10 */
+ if (!is_6191x || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ /* FIXME: USXGMII is not supported yet */
+ /* __set_bit(PHY_INTERFACE_MODE_USXGMII, supported); */
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities |= MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+ }
+ }
}
-static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mv88e6xxx_chip *chip = ds->priv;
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set_port_modes(mask);
-
- if (chip->info->ops->phylink_validate)
- chip->info->ops->phylink_validate(chip, port, mask, state);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ chip->info->ops->phylink_get_caps(chip, port, config);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ /* Internal ports need GMII for PHYLIB */
+ if (mv88e6xxx_phy_is_internal(ds, port))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
}
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
@@ -1283,8 +1425,15 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
pvlan = 0;
- /* Frames from user ports can egress any local DSA links and CPU ports,
- * as well as any local member of their bridge group.
+ /* Frames from standalone user ports can only egress on the
+ * upstream port.
+ */
+ if (!dsa_port_bridge_dev_get(dp))
+ return BIT(dsa_switch_upstream_port(ds));
+
+ /* Frames from bridged user ports can egress any local DSA
+ * links and CPU ports, as well as any local member of their
+ * bridge group.
*/
dsa_switch_for_each_port(other_dp, ds)
if (other_dp->type == DSA_PORT_TYPE_CPU ||
@@ -1476,15 +1625,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
- if (dp && dp->lag_dev) {
+ if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
- * the LAG ID as the port number.
+ * the LAG ID (one-based) as the port number
+ * (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
- port = dsa_lag_id(dst, dp->lag_dev);
+ port = dsa_port_lag_id_get(dp) - 1;
}
}
@@ -1517,24 +1667,31 @@ static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
return 0;
}
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 fid)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (dsa_to_port(ds, port)->lag_dev)
+ if (dsa_to_port(chip->ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
*/
- return;
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+ err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
+ dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+ port, err);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1616,21 +1773,11 @@ static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- int i, err;
- u16 fid;
-
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
- /* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, &fid);
- if (err)
- return err;
-
- set_bit(fid, fid_bitmap);
- }
-
- /* Set every FID bit used by the VLAN entries */
+ /* Every FID has an associated VID, so walking the VTU
+ * will discover the full set of FIDs in use.
+ */
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
}
@@ -1643,10 +1790,7 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
if (err)
return err;
- /* The reset value 0x000 is used to indicate that multiple address
- * databases are not needed. Return the next positive available.
- */
- *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
@@ -1654,6 +1798,187 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ if (!chip->info->ops->stu_loadpurge)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->stu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_stu_entry stu = {
+ .valid = true,
+ .sid = 0
+ };
+
+ if (!mv88e6xxx_has_stu(chip))
+ return 0;
+
+ /* Make sure that SID 0 is always valid. This is used by VTU
+ * entries that do not make use of the STU, e.g. when creating
+ * a VLAN upper on a port that is also part of a VLAN
+ * filtering bridge.
+ */
+ return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+ struct mv88e6xxx_mst *mst;
+
+ __set_bit(0, busy);
+
+ list_for_each_entry(mst, &chip->msts, node)
+ __set_bit(mst->stu.sid, busy);
+
+ *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+ return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ struct mv88e6xxx_mst *mst, *tmp;
+ int err;
+
+ if (!sid)
+ return 0;
+
+ list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+ if (mst->stu.sid != sid)
+ continue;
+
+ if (!refcount_dec_and_test(&mst->refcnt))
+ return 0;
+
+ mst->stu.valid = false;
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err) {
+ refcount_set(&mst->refcnt, 1);
+ return err;
+ }
+
+ list_del(&mst->node);
+ kfree(mst);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+ u16 msti, u8 *sid)
+{
+ struct mv88e6xxx_mst *mst;
+ int err, i;
+
+ if (!mv88e6xxx_has_stu(chip)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!msti) {
+ *sid = 0;
+ return 0;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == br && mst->msti == msti) {
+ refcount_inc(&mst->refcnt);
+ *sid = mst->stu.sid;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_sid_get(chip, sid);
+ if (err)
+ goto err;
+
+ mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+ if (!mst) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&mst->node);
+ refcount_set(&mst->refcnt, 1);
+ mst->br = br;
+ mst->msti = msti;
+ mst->stu.valid = true;
+ mst->stu.sid = *sid;
+
+ /* The bridge starts out all ports in the disabled state. But
+ * a STU state of disabled means to go by the port-global
+ * state. So we set all user port's initial state to blocking,
+ * to match the bridge's behavior.
+ */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+ MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+ MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err)
+ goto err_free;
+
+ list_add_tail(&mst->node, &chip->msts);
+ return 0;
+
+err_free:
+ kfree(mst);
+err:
+ return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_mst *mst;
+ u8 state;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == dsa_port_bridge_dev_get(dp) &&
+ mst->msti == st->msti) {
+ if (mst->stu.state[port] == state)
+ return 0;
+
+ mst->stu.state[port] = state;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
@@ -2138,6 +2463,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
+ if (vid == MV88E6XXX_VID_STANDALONE)
+ vlan.policy = true;
+
err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
@@ -2270,6 +2598,12 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ if (!vlan.valid) {
+ err = mv88e6xxx_mst_put(chip, vlan.sid);
+ if (err)
+ return err;
+ }
+
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
@@ -2315,8 +2649,75 @@ unlock:
return err;
}
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+ struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ u8 old_sid, new_sid;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ old_sid = vlan.sid;
+
+ err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+ if (err)
+ goto unlock;
+
+ if (new_sid != old_sid) {
+ vlan.sid = new_sid;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err) {
+ mv88e6xxx_mst_put(chip, new_sid);
+ goto unlock;
+ }
+ }
+
+ err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2330,7 +2731,8 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2476,7 +2878,8 @@ static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2487,6 +2890,10 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
if (err)
goto unlock;
+ err = mv88e6xxx_port_set_map_da(chip, port, true);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
goto unlock;
@@ -2521,6 +2928,12 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+ err = mv88e6xxx_port_set_map_da(chip, port, false);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore map-DA: %pe\n",
+ port, ERR_PTR(err));
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
dev_err(ds->dev,
@@ -2532,7 +2945,8 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct dsa_bridge bridge)
+ int port, struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2864,7 +3278,10 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
+ struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ struct dsa_port *dp;
+ int tx_amp;
int err;
u16 reg;
@@ -2918,12 +3335,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
/* Port Control 2: don't force a good FCS, set the MTU size to
- * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
- * untagged frames on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't send a
- * copy of all transmitted/received frames on this port to the CPU.
+ * 10222 bytes, disable 802.1q tags checking, don't discard
+ * tagged or untagged frames on this port, skip destination
+ * address lookup on user ports, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
*/
- err = mv88e6xxx_port_set_map_da(chip, port);
+ err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
if (err)
return err;
@@ -2931,8 +3349,44 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* On chips that support it, set all downstream DSA ports'
+ * VLAN policy to TRAP. In combination with loading
+ * MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
+ * provides a better isolation barrier between standalone
+ * ports, as the ATU is bypassed on any intermediate switches
+ * between the incoming port and the CPU.
+ */
+ if (dsa_is_downstream_port(ds, port) &&
+ chip->info->ops->port_set_policy) {
+ err = chip->info->ops->port_set_policy(chip, port,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_ACTION_TRAP);
+ if (err)
+ return err;
+ }
+
+ /* User ports start out in standalone mode and 802.1Q is
+ * therefore disabled. On DSA ports, all valid VIDs are always
+ * loaded in the VTU - therefore, enable 802.1Q in order to take
+ * advantage of VLAN policy on chips that supports it.
+ */
err = mv88e6xxx_port_set_8021q_mode(chip, port,
- MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
+ dsa_is_user_port(ds, port) ?
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
+ if (err)
+ return err;
+
+ /* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
+ * virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID. This will be used as the private PVID for
+ * unbridged ports. Shared (DSA and CPU) ports must also be
+ * members of this VID, in order to trap all frames assigned to
+ * it to the CPU.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
if (err)
return err;
@@ -2945,7 +3399,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* relying on their port default FID.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
@@ -3018,6 +3472,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ dp = dsa_to_port(ds, port);
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -3216,6 +3687,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+ * VTU, thereby also flushing the STU).
+ */
+ err = mv88e6xxx_stu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3589,7 +4067,9 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3623,7 +4103,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3639,6 +4119,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3669,7 +4150,9 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3706,7 +4189,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3747,7 +4232,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -3795,6 +4280,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -3811,7 +4298,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3851,9 +4338,11 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3887,9 +4376,11 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3931,7 +4422,9 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3977,6 +4470,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -3986,7 +4481,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -4028,7 +4523,9 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -4074,6 +4571,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4085,8 +4584,9 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -4125,7 +4625,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4172,6 +4672,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4187,7 +4689,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -4233,6 +4735,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4248,7 +4752,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -4292,6 +4796,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4308,7 +4814,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -4354,6 +4860,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4365,10 +4873,11 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
@@ -4408,7 +4917,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
- .phylink_validate = mv88e6065_phylink_validate,
+ .phylink_get_caps = mv88e6250_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -4453,6 +4962,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4470,7 +4981,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -4514,7 +5025,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -4556,7 +5067,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -4604,6 +5115,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4622,7 +5135,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -4664,7 +5177,9 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -4706,9 +5221,11 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -4754,6 +5271,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4771,7 +5290,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .phylink_validate = mv88e6352_phylink_validate,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -4818,6 +5338,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4836,7 +5358,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -4883,6 +5405,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
@@ -4900,7 +5424,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
@@ -4951,6 +5475,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6393x_serdes_power,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
@@ -4964,7 +5490,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6393x_phylink_validate,
+ .phylink_get_caps = mv88e6393x_phylink_get_caps,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -4977,6 +5503,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5019,6 +5546,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5042,6 +5570,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5086,6 +5615,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5109,6 +5639,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5133,6 +5664,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5156,6 +5688,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5180,6 +5713,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5203,6 +5737,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5227,6 +5762,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5272,6 +5808,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5295,6 +5832,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5317,6 +5855,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5339,6 +5878,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5361,6 +5901,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5411,6 +5952,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5456,6 +5998,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5529,6 +6072,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5553,6 +6097,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5576,6 +6121,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5600,6 +6146,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5624,6 +6171,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5648,6 +6196,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5671,6 +6220,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5739,6 +6289,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
mutex_init(&chip->reg_lock);
INIT_LIST_HEAD(&chip->mdios);
idr_init(&chip->policies);
+ INIT_LIST_HEAD(&chip->msts);
return chip;
}
@@ -5791,7 +6342,8 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5805,7 +6357,8 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5819,7 +6372,8 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
enum mv88e6xxx_egress_direction direction = ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
@@ -5893,7 +6447,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED))
return -EINVAL;
ops = chip->info->ops;
@@ -5951,6 +6505,13 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_LOCKED) {
+ bool locked = !!(flags.val & BR_PORT_LOCKED);
+
+ err = mv88e6xxx_port_set_lock(chip, port, locked);
+ if (err)
+ goto out;
+ }
out:
mv88e6xxx_reg_unlock(chip);
@@ -5958,21 +6519,20 @@ out:
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
if (!mv88e6xxx_has_lag(chip))
return false;
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
@@ -5992,20 +6552,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
-static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
@@ -6050,8 +6611,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
- struct net_device *lag;
struct dsa_port *dp;
+ struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
@@ -6060,8 +6621,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
- list_for_each_entry(dp, &ds->dst->ports, list) {
- if (!dp->lag_dev || dp->ds != ds)
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
@@ -6074,7 +6635,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
- lag = dsa_lag_dev(ds->dst, id);
+ lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
@@ -6110,7 +6671,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
- struct net_device *lag)
+ struct dsa_lag lag)
{
int err;
@@ -6134,7 +6695,7 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -6143,7 +6704,8 @@ static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based */
+ id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
@@ -6166,7 +6728,7 @@ err_unlock:
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
@@ -6191,7 +6753,7 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag,
+ int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -6214,7 +6776,7 @@ unlock:
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag)
+ int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
@@ -6233,7 +6795,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.teardown = mv88e6xxx_teardown,
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
- .phylink_validate = mv88e6xxx_validate,
+ .phylink_get_caps = mv88e6xxx_get_caps,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
.phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
@@ -6261,10 +6823,13 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_mst_state_set = mv88e6xxx_port_mst_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
+ .vlan_msti_set = mv88e6xxx_vlan_msti_set,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
.port_fdb_dump = mv88e6xxx_port_fdb_dump,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8271b8aa7b71..5e03cfe50156 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -20,6 +20,7 @@
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_N_SID 64
#define MV88E6XXX_FID_STANDALONE 0
#define MV88E6XXX_FID_BRIDGED 1
@@ -130,6 +131,7 @@ struct mv88e6xxx_info {
unsigned int num_internal_phys;
unsigned int num_gpio;
unsigned int max_vid;
+ unsigned int max_sid;
unsigned int port_base_addr;
unsigned int phy_base_addr;
unsigned int global1_addr;
@@ -179,7 +181,14 @@ struct mv88e6xxx_vtu_entry {
u16 fid;
u8 sid;
bool valid;
+ bool policy;
u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS]; /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+ u8 sid;
+ bool valid;
u8 state[DSA_MAX_PORTS];
};
@@ -278,6 +287,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_STU,
MV88E6XXX_REGION_PVT,
_MV88E6XXX_REGION_MAX,
@@ -287,6 +297,16 @@ struct mv88e6xxx_region_priv {
enum mv88e6xxx_region_id id;
};
+struct mv88e6xxx_mst {
+ struct list_head node;
+
+ refcount_t refcnt;
+ struct net_device *br;
+ u16 msti;
+
+ struct mv88e6xxx_stu_entry stu;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -387,11 +407,15 @@ struct mv88e6xxx_chip {
/* devlink regions */
struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+ /* Bridge MST to SID mappings */
+ struct list_head msts;
};
struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+ int (*init)(struct mv88e6xxx_chip *chip);
};
struct mv88e6xxx_mdio_bus {
@@ -586,6 +610,10 @@ struct mv88e6xxx_ops {
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -596,6 +624,12 @@ struct mv88e6xxx_ops {
int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+ /* Spanning Tree Unit operations */
+ int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+ int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+
/* GPIO operations */
const struct mv88e6xxx_gpio_ops *gpio_ops;
@@ -609,9 +643,8 @@ struct mv88e6xxx_ops {
const struct mv88e6xxx_ptp_ops *ptp_ops;
/* Phylink */
- void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state);
+ void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config);
/* Max Frame Size */
int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
@@ -695,6 +728,13 @@ struct mv88e6xxx_hw_stat {
int type;
};
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid > 0 &&
+ chip->info->ops->stu_loadpurge &&
+ chip->info->ops->stu_getnext;
+}
+
static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
{
return chip->info->pvt;
@@ -725,6 +765,11 @@ static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
return chip->info->max_vid;
}
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 381068395c63..1266eabee086 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -503,6 +503,85 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
return 0;
}
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @vid: Global1/6: Valid bit.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+ u16 sid;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_stu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_stu_entry stu;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_stu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ stu.sid = mv88e6xxx_max_sid(chip);
+ stu.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+ if (err)
+ break;
+
+ if (!stu.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -605,6 +684,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+ .name = "stu",
+ .snapshot = mv88e6xxx_region_stu_snapshot,
+ .destructor = kfree,
+};
+
static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
@@ -640,6 +725,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_STU] = {
+ .ops = &mv88e6xxx_region_stu_ops,
+ .cond = mv88e6xxx_has_stu,
+ /* calculated at runtime */
+ },
[MV88E6XXX_REGION_PVT] = {
.ops = &mv88e6xxx_region_pvt_ops,
.size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
@@ -706,6 +796,10 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
+ case MV88E6XXX_REGION_STU:
+ size = (mv88e6xxx_max_sid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_stu_entry);
+ break;
}
region = dsa_devlink_region_create(ds, ops, 1, size);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 4f3dbb015f77..65958b2a0d3a 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -46,6 +46,7 @@
/* Offset 0x02: VTU FID Register */
#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_VID_POLICY 0x1000
#define MV88E6352_G1_VTU_FID_MASK 0x0fff
/* Offset 0x03: VTU SID Register */
@@ -347,6 +348,16 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index ae12c981923e..38e18f5811bf 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -27,7 +27,7 @@ static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
return err;
entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
-
+ entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
return 0;
}
@@ -36,13 +36,15 @@ static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
{
u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
+ if (entry->policy)
+ val |= MV88E6352_G1_VTU_FID_VID_POLICY;
+
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
-static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
{
u16 val;
int err;
@@ -51,15 +53,14 @@ static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
+ *sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
-static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
{
- u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
+ u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
@@ -88,7 +89,7 @@ static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
/* Offset 0x06: VTU VID Register */
static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool *valid, u16 *vid)
{
u16 val;
int err;
@@ -97,25 +98,28 @@ static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->vid = val & 0xfff;
+ if (vid) {
+ *vid = val & 0xfff;
- if (val & MV88E6390_G1_VTU_VID_PAGE)
- entry->vid |= 0x1000;
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
+ *vid |= 0x1000;
+ }
- entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+ if (valid)
+ *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool valid, u16 vid)
{
- u16 val = entry->vid & 0xfff;
+ u16 val = vid & 0xfff;
- if (entry->vid & 0x1000)
+ if (vid & 0x1000)
val |= MV88E6390_G1_VTU_VID_PAGE;
- if (entry->valid)
+ if (valid)
val |= MV88E6XXX_G1_VTU_VID_VALID;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
@@ -144,7 +148,7 @@ static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3];
int err;
@@ -157,36 +161,20 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
/* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
- entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
- }
-
- return 0;
-}
-
-static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 regs[3];
- int err;
- int i;
-
- err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
- if (err)
- return err;
-
- /* Extract PortState data */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int state_offset = (i % 4) * 4 + 2;
+ if (member)
+ member[i] = (regs[i / 4] >> member_offset) & 0x3;
- entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ if (state)
+ state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
return 0;
}
static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3] = { 0 };
int i;
@@ -196,8 +184,11 @@ static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
- regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
- regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+ if (member)
+ regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+ if (state)
+ regs[i / 4] |= (state[i] & 0x3) << state_offset;
}
/* Write all 3 VTU/STU Data registers */
@@ -265,48 +256,6 @@ static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
/* VLAN Translation Unit Operations */
-static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
- if (err)
- return err;
-
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
-}
-
-static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *vtu)
-{
- struct mv88e6xxx_vtu_entry stu;
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
- if (err)
- return err;
-
- stu.sid = vtu->sid - 1;
-
- err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
- if (err)
- return err;
-
- if (stu.sid != vtu->sid || !stu.valid)
- return -EINVAL;
-
- return 0;
-}
-
int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -324,7 +273,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
* write the VID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
if (err)
return err;
}
@@ -333,7 +282,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+ return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
}
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
@@ -347,11 +296,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
if (err)
return err;
@@ -381,7 +326,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
if (err)
return err;
@@ -389,12 +334,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -417,16 +357,11 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -444,12 +379,12 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
if (err)
return err;
@@ -476,27 +411,21 @@ int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write MemberTag and PortState data */
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ /* Write MemberTag data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
@@ -514,41 +443,113 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write PortState data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- /* Write MemberTag data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active SID, the STU GetNext operation can be
+ * started again without setting the SID registers since it already
+ * contains the last SID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the SID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
- /* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+ return 0;
}
-int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
{
int err;
@@ -556,16 +557,59 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
if (err)
return err;
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+/* VTU Violation Management */
+
static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
- struct mv88e6xxx_vtu_entry entry;
+ u16 val, vid;
int spid;
int err;
- u16 val;
mv88e6xxx_reg_lock(chip);
@@ -577,7 +621,7 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
if (err)
goto out;
@@ -585,13 +629,13 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
- entry.vid, spid);
+ vid, spid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
- entry.vid, spid);
+ vid, spid);
chip->ports[spid].vtu_miss_violation++;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index f3e27573a386..807aeaad9830 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -299,6 +299,8 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
@@ -370,6 +372,7 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index eda710062933..a9d6e40321a2 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -289,3 +289,31 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
}
+
+/**
+ * mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
+ * @chip: chip private data
+ * @port: port number to check for serdes
+ *
+ * Indicates whether the port may have a serdes attached according to the
+ * pin strapping. Returns negative error number, 0 if the port is not
+ * configured to have a serdes, and 1 if the port is configured to have a
+ * serdes attached.
+ */
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 config3, p;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
+ &config3);
+ if (err)
+ return err;
+
+ if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
+ p = 5;
+ else
+ p = 4;
+
+ return port == p;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 389f8a6ec0ab..331b4ca089ff 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -301,7 +301,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
shwt->hwtstamp = ns_to_ktime(ns);
status &= ~MV88E6XXX_PTP_TS_VALID;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ab41619a809b..795b3128768f 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -550,6 +550,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
mode = PHY_INTERFACE_MODE_1000BASEX;
switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -610,6 +613,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
chip->ports[port].cmode = cmode;
lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
if (lane < 0)
return lane;
@@ -1234,6 +1239,35 @@ int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
return err;
}
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
+ if (locked)
+ reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+ if (locked)
+ reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
@@ -1278,7 +1312,7 @@ int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
}
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
{
u16 reg;
int err;
@@ -1287,7 +1321,10 @@ int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ if (map)
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03382b66f800..e0a705d82019 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,6 +42,11 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_MII_PHY 0x0001
+#define MV88E6XXX_PORT_STS_CMODE_MII 0x0002
+#define MV88E6XXX_PORT_STS_CMODE_GMII 0x0003
+#define MV88E6XXX_PORT_STS_CMODE_RMII_PHY 0x0004
+#define MV88E6XXX_PORT_STS_CMODE_RMII 0x0005
#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
@@ -142,7 +147,11 @@
/* Offset 0x04: Port Control Register */
#define MV88E6XXX_PORT_CTL0 0x04
#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
-#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
@@ -365,6 +374,9 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked);
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode);
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
@@ -425,7 +437,7 @@ int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged);
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2b05ead515cd..7b37d45bc9fb 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -272,14 +272,6 @@ int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
-{
- if (mv88e6xxx_serdes_get_lane(chip, port) >= 0)
- return true;
-
- return false;
-}
-
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -293,20 +285,24 @@ static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6352_port_has_serdes(chip, port))
- return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+ int err;
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data)
{
struct mv88e6352_serdes_hw_stat *stat;
- int i;
+ int err, i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
@@ -348,11 +344,12 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
+ int i, err;
u64 value;
- int i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
@@ -419,8 +416,13 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
return 32 * sizeof(u16);
}
@@ -432,7 +434,8 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
int err;
int i;
- if (!mv88e6352_port_has_serdes(chip, port))
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return;
for (i = 0 ; i < 32; i++) {
@@ -1310,6 +1313,44 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
+{
+ bool found = false;
+ u16 ctrl, reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
+ if (err)
+ return err;
+
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
+}
+
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
bool on)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 8dd8ed225b45..29bb4e91e2f6 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -27,6 +27,8 @@
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -176,6 +178,9 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 282fe08db050..a990271b7482 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -55,11 +55,15 @@ static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
int dev, int reg, int bit, int val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- for (i = 0; i < 16; i++) {
+ /* Even if the initial poll takes longer than 50ms, always do
+ * at least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
if (err)
return err;
@@ -67,7 +71,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (!!(data & BIT(bit)) == !!val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
@@ -104,11 +111,6 @@ static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
@@ -132,11 +134,6 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
if (err)
@@ -155,9 +152,20 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
MV88E6XXX_SMI_CMD, 15, 0);
}
+static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
+{
+ /* Ensure that the chip starts out in the ready state. As both
+ * reads and writes always ensure this on return, they can
+ * safely depend on the chip not being busy on entry.
+ */
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
.read = mv88e6xxx_smi_indirect_read,
.write = mv88e6xxx_smi_indirect_write,
+ .init = mv88e6xxx_smi_indirect_init,
};
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
@@ -175,5 +183,8 @@ int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
chip->bus = bus;
chip->sw_addr = sw_addr;
+ if (chip->smi_ops->init)
+ return chip->smi_ops->init(chip);
+
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 9957772201d5..413b0006e9a2 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -25,21 +25,151 @@
#include <net/dsa.h>
#include "felix.h"
-static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
+{
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+/* We are called before felix_npi_port_init(), so ocelot->npi is -1. */
+static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev);
+}
+
+static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct switchdev_obj_port_mdb mdb;
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ memset(&mdb, 0, sizeof(mdb));
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev);
+}
+
+static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to,
+ int pgid)
+{
+ struct ocelot *ocelot = ds->priv;
+ bool on;
+ u32 val;
+
+ val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid);
+ on = !!(val & BIT(from));
+ val &= ~BIT(from);
+ if (on)
+ val |= BIT(to);
+ else
+ val &= ~BIT(to);
+
+ ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid);
+}
+
+static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC);
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC);
+ felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC);
+}
+
+static void
+felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC);
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC);
+ felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC);
+}
+
+/* ocelot->npi was already set to -1 by felix_npi_port_deinit, so
+ * ocelot_fdb_add() will not redirect FDB entries towards the
+ * CPU port module here, which is what we want.
+ */
+static int
+felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int
+felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct switchdev_obj_port_mdb mdb;
+ struct ocelot *ocelot = ds->priv;
+ int cpu = ocelot->num_phys_ports;
+ int err;
+
+ memset(&mdb, 0, sizeof(mdb));
+ ether_addr_copy(mdb.addr, addr);
+ mdb.vid = vid;
+
+ err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev);
+ if (err)
+ return err;
+
+ return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev);
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int key_length, upstream, err;
- /* We don't need to install the rxvlan into the other ports' filtering
- * tables, because we're just pushing the rxvlan when sending towards
- * the CPU
- */
- if (!pvid)
- return 0;
-
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
upstream = dsa_upstream_port(ds, port);
@@ -50,7 +180,7 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port);
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -71,21 +201,32 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
-static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = &felix->ocelot;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ port, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int upstream, err;
- /* tag_8021q.c assumes we are implementing this via port VLAN
- * membership, which we aren't. So we don't need to add any VCAP filter
- * for the CPU port.
- */
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
return -ENOMEM;
@@ -103,7 +244,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -124,7 +265,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
redirect_rule->ingress_port_mask = BIT(upstream);
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -142,49 +283,7 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return 0;
}
-static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- u16 flags)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct ocelot *ocelot = ds->priv;
-
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- return 0;
-}
-
-static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
-{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
-
- block_vcap_es0 = &ocelot->block[VCAP_ES0];
-
- outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
- /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
- * installing outer tagging ES0 rules where they weren't needed.
- * But in rxvlan_del, the API doesn't give us the "flags" anymore,
- * so that forces us to be slightly sloppy here, and just assume that
- * if we didn't find an outer_tagging_rule it means that there was
- * none in the first place, i.e. rxvlan_del is called on a non-pvid
- * port. This is most probably true though.
- */
- if (!outer_tagging_rule)
- return 0;
-
- return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
-}
-
-static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
@@ -192,16 +291,13 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
struct ocelot *ocelot = &felix->ocelot;
int err;
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
port, false);
if (!untagging_rule)
- return 0;
+ return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
@@ -210,22 +306,54 @@ static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
port, false);
if (!redirect_rule)
- return 0;
+ return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
+ if (err)
+ return err;
+
+ err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid);
+ if (err) {
+ felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
+ return err;
+ }
+
+ return 0;
+}
+
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot *ocelot = ds->priv;
+ int err;
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ if (!dsa_is_user_port(ds, port))
+ return 0;
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid);
+ if (err)
+ return err;
+
+ err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid);
+ if (err) {
+ felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid);
+ return err;
+ }
return 0;
}
@@ -241,8 +369,7 @@ static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
{
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
- ocelot->npi = -1;
+ ocelot_port_set_dsa_8021q_cpu(ocelot, port);
/* Overwrite PGID_CPU with the non-tagging port */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
@@ -256,7 +383,7 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
{
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ ocelot_port_unset_dsa_8021q_cpu(ocelot, port);
/* Restore PGID_CPU */
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
@@ -267,148 +394,81 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
mutex_unlock(&ocelot->fwd_domain_lock);
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
*/
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ struct dsa_port *dp;
+ bool cpu_copy_ena;
+ int cpu = -1, err;
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
+ /* Figure out the current CPU port */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ cpu = dp->index;
+ break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
- }
- }
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
+ */
- if (cpu < 0) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return -EINVAL;
- }
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &ocelot->traps, trap_list) {
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(cpu);
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
+ }
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
- }
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
- */
- ocelot_drain_cpu_queue(ocelot, 0);
-
return 0;
}
-static int felix_teardown_mmio_filtering(struct felix *felix)
-{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
-
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
-
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
-
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
-
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
-
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
-}
-
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- unsigned long cpu_flood;
- int port, err;
+ struct dsa_port *dp;
+ int err;
felix_8021q_cpu_port_init(ocelot, cpu);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
@@ -421,28 +481,43 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
- /* In tag_8021q mode, the CPU port module is unused, except for PTP
- * frames. So we want to disable flooding of any kind to the CPU port
- * module, since packets going there will end in a black hole.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
-
err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
if (err)
return err;
- err = felix_setup_mmio_filtering(felix);
+ err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
if (err)
goto out_tag_8021q_unregister;
+ err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port);
+ if (err)
+ goto out_migrate_fdbs;
+
+ felix_migrate_flood_to_tag_8021q_port(ds, cpu);
+
+ err = felix_update_trapping_destinations(ds, true);
+ if (err)
+ goto out_migrate_flood;
+
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_drain_cpu_queue(ocelot, 0);
+
return 0;
+out_migrate_flood:
+ felix_migrate_flood_to_npi_port(ds, cpu);
+ dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+out_migrate_fdbs:
+ dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
out_tag_8021q_unregister:
dsa_tag_8021q_unregister(ds);
return err;
@@ -451,27 +526,24 @@ out_tag_8021q_unregister:
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
+ struct dsa_port *dp;
+ int err;
- err = felix_teardown_mmio_filtering(felix);
+ err = felix_update_trapping_destinations(ds, false);
if (err)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
dsa_tag_8021q_unregister(ds);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
+ dp->index);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
@@ -523,27 +595,26 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- unsigned long cpu_flood;
+ int err;
- felix_npi_port_init(ocelot, cpu);
+ err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port);
+ if (err)
+ return err;
- /* Include the CPU port module (and indirectly, the NPI port)
- * in the forwarding mask for unknown unicast - the hardware
- * default value for ANA_FLOODING_FLD_UNICAST excludes
- * BIT(ocelot->num_phys_ports), and so does ocelot_init,
- * since Ocelot relies on whitelisting MAC addresses towards
- * PGID_CPU.
- * We do this because DSA does not yet perform RX filtering,
- * and the NPI port does not perform source address learning,
- * so traffic sent to Linux is effectively unknown from the
- * switch's perspective.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+ err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port);
+ if (err)
+ goto out_migrate_fdbs;
+
+ felix_migrate_flood_to_npi_port(ds, cpu);
+
+ felix_npi_port_init(ocelot, cpu);
return 0;
+
+out_migrate_fdbs:
+ dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port);
+
+ return err;
}
static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
@@ -659,35 +730,97 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port,
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -719,13 +852,13 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, bridge.dev);
-
- return 0;
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
@@ -737,20 +870,20 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
+ struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ return ocelot_port_lag_join(ocelot, port, lag.dev, info);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
return 0;
}
@@ -822,6 +955,21 @@ static int felix_vlan_del(struct dsa_switch *ds, int port,
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
+static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ __set_bit(ocelot->ports[port]->phy_mode,
+ config->supported_interfaces);
+}
+
static void felix_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -833,16 +981,18 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
felix->info->phylink_validate(ocelot, port, supported, state);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
+static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct phylink_pcs *pcs = NULL;
if (felix->pcs && felix->pcs[port])
- phylink_set_pcs(dp->pl, felix->pcs[port]);
+ pcs = felix->pcs[port];
+
+ return pcs;
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -924,11 +1074,28 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
+ [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
+ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
+ [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
+ [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
+};
+
+static int felix_validate_phy_mode(struct felix *felix, int port,
+ phy_interface_t phy_mode)
+{
+ u32 modes = felix->info->port_modes[port];
+
+ if (felix_phy_match_table[phy_mode] & modes)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
static int felix_parse_ports_node(struct felix *felix,
struct device_node *ports_node,
phy_interface_t *port_phy_modes)
{
- struct ocelot *ocelot = &felix->ocelot;
struct device *dev = felix->ocelot.dev;
struct device_node *child;
@@ -955,7 +1122,7 @@ static int felix_parse_ports_node(struct felix *felix,
return -ENODEV;
}
- err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
dev_err(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
@@ -1192,7 +1359,9 @@ static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ unsigned long cpu_flood;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
@@ -1211,45 +1380,45 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_init_port(ocelot, port);
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
+ felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
+ dsa_switch_for_each_cpu_port(dp, ds) {
/* The initial tag protocol is NPI which always returns 0, so
* there's no real point in checking for errors.
*/
- felix_set_tag_protocol(ds, port, felix->tag_proto);
+ felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
+
+ /* Start off with flooding disabled towards the NPI port
+ * (actually CPU port module).
+ */
+ cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
+ ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
+
break;
}
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1265,22 +1434,15 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_del_tag_protocol(ds, port, felix->tag_proto);
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ felix_del_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
@@ -1302,14 +1464,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
-static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
- int err, grp = 0;
+ int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
@@ -1317,9 +1488,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
if (!felix->info->quirk_no_xtr_irq)
return false;
- if (ptp_type == PTP_CLASS_NONE)
- return false;
-
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1349,8 +1517,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
}
out:
- if (err < 0)
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
+ }
return true;
}
@@ -1370,7 +1542,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
- if (felix_check_xtr_pkt(ocelot, type)) {
+ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
@@ -1430,8 +1602,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
@@ -1469,6 +1650,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1618,6 +1817,44 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
@@ -1629,14 +1866,17 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
+ .phylink_get_caps = felix_phylink_get_caps,
.phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
@@ -1658,6 +1898,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
@@ -1678,6 +1920,11 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9395ac119d33..f083b06fdfe9 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,6 +7,12 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_INTERNAL BIT(0)
+#define OCELOT_PORT_MODE_SGMII BIT(1)
+#define OCELOT_PORT_MODE_QSGMII BIT(2)
+#define OCELOT_PORT_MODE_2500BASEX BIT(3)
+#define OCELOT_PORT_MODE_USXGMII BIT(4)
+
/* Platform-specific information */
struct felix_info {
const struct resource *target_io_res;
@@ -15,6 +21,7 @@ struct felix_info {
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
+ const u32 *port_modes;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
@@ -44,8 +51,6 @@ struct felix_info {
void (*phylink_validate)(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state);
- int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 33f0ceae381d..62d52e0874e9 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -18,12 +18,28 @@
#include <linux/pci.h>
#include "felix.h"
+#define VSC9959_NUM_PORTS 6
+
#define VSC9959_TAS_GCL_ENTRY_MAX 63
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
#define VSC9959_IMDIO_PCI_BAR 0
+#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII | \
+ OCELOT_PORT_MODE_2500BASEX | \
+ OCELOT_PORT_MODE_USXGMII)
+
+static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
REG(ANA_VLANMASK, 0x0089a4),
@@ -944,15 +960,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Pause);
@@ -975,27 +984,6 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
linkmode_and(state->advertising, state->advertising, mask);
}
-static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 4 && port != 5)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* Not supported on internal to-CPU ports */
- if (port == 4 || port == 5)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
@@ -2231,14 +2219,14 @@ static const struct felix_info felix_info_vsc9959 = {
.vcap_pol_base2 = 0,
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
- .num_ports = 6,
+ .num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
.phylink_validate = vsc9959_phylink_validate,
- .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
+ .port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
.init_regmap = ocelot_regmap_init,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index f2f1608a476c..68ef8f111bbe 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -14,11 +14,29 @@
#include <linux/iopoll.h>
#include "felix.h"
+#define VSC9953_NUM_PORTS 10
+
#define VSC9953_VCAP_POLICER_BASE 11
#define VSC9953_VCAP_POLICER_MAX 31
#define VSC9953_VCAP_POLICER_BASE2 120
#define VSC9953_VCAP_POLICER_MAX2 161
+#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
+
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
REG(ANA_VLANMASK, 0x00b504),
@@ -917,15 +935,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Pause);
@@ -945,25 +956,6 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
linkmode_and(state->advertising, state->advertising, mask);
}
-static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 8 && port != 9)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- /* Not supported on internal to-CPU ports */
- if (port == 8 || port == 9)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
@@ -1101,12 +1093,12 @@ static const struct felix_info seville_info_vsc9953 = {
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.num_mact_rows = 2048,
- .num_ports = 10,
+ .num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
- .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
+ .port_modes = vsc9953_port_modes,
.init_regmap = ocelot_regmap_init,
};
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index c39de2a4c1fe..e5098cfe44bc 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -499,52 +499,27 @@ static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_AR9331;
}
-static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
switch (port) {
case 0:
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
- state->interface, port);
}
static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -697,7 +672,7 @@ static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
- .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_get_caps = ar9331_sw_phylink_get_caps,
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 039694518788..d3ed0a7f8077 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -20,6 +20,7 @@
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
@@ -74,12 +75,6 @@ static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0xac, "TXUnicast"),
};
-/* The 32bit switch registers are accessed indirectly. To achieve this we need
- * to set the page of the register. Track the last page that was set to reduce
- * mdio writes
- */
-static u16 qca8k_current_page = 0xffff;
-
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
@@ -94,6 +89,44 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
}
static int
+qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+{
+ u16 *cached_lo = &priv->mdio_cache.lo;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (lo == *cached_lo)
+ return 0;
+
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit lo register\n");
+
+ *cached_lo = lo;
+ return 0;
+}
+
+static int
+qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+{
+ u16 *cached_hi = &priv->mdio_cache.hi;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (hi == *cached_hi)
+ return 0;
+
+ ret = bus->write(bus, phy_id, regnum, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit hi register\n");
+
+ *cached_hi = hi;
+ return 0;
+}
+
+static int
qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
int ret;
@@ -116,7 +149,7 @@ qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
}
static void
-qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
{
u16 lo, hi;
int ret;
@@ -124,20 +157,19 @@ qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
lo = val & 0xffff;
hi = (u16)(val >> 16);
- ret = bus->write(bus, phy_id, regnum, lo);
+ ret = qca8k_set_lo(priv, phy_id, regnum, lo);
if (ret >= 0)
- ret = bus->write(bus, phy_id, regnum + 1, hi);
- if (ret < 0)
- dev_err_ratelimited(&bus->dev,
- "failed to write qca8k 32bit register\n");
+ ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
}
static int
-qca8k_set_page(struct mii_bus *bus, u16 page)
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
{
+ u16 *cached_page = &priv->mdio_cache.page;
+ struct mii_bus *bus = priv->bus;
int ret;
- if (page == qca8k_current_page)
+ if (page == *cached_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
@@ -147,7 +179,7 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
return ret;
}
- qca8k_current_page = page;
+ *cached_page = page;
usleep_range(1000, 2000);
return 0;
}
@@ -170,6 +202,252 @@ qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
return regmap_update_bits(priv->regmap, reg, mask, write_val);
}
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u8 len, cmd;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
+
+ /* Make sure the seq match the requested packet */
+ if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+ mgmt_eth_data->ack = true;
+
+ if (cmd == MDIO_READ) {
+ mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
+
+ /* Get the rest of the 12 byte of data.
+ * The read/write function will extract the requested data.
+ */
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(mgmt_eth_data->data + 1, skb->data,
+ QCA_HDR_MGMT_DATA2_LEN);
+ }
+
+ complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+ int priority, unsigned int len)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ unsigned int real_len;
+ struct sk_buff *skb;
+ u32 *data2;
+ u16 hdr;
+
+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+ if (!skb)
+ return NULL;
+
+ /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
+ * Actually for some reason the steps are:
+ * 0: nothing
+ * 1-4: first 4 byte
+ * 5-6: first 12 byte
+ * 7-15: all 16 byte
+ */
+ if (len == 16)
+ real_len = 15;
+ else
+ real_len = len;
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->len);
+
+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+ hdr |= QCA_HDR_XMIT_FROM_CPU;
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+ mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+ if (cmd == MDIO_WRITE)
+ mgmt_ethhdr->mdio_data = *val;
+
+ mgmt_ethhdr->hdr = htons(hdr);
+
+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+ mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ *val = mgmt_eth_data->data[0];
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val &= ~mask;
+ val |= write_val;
+
+ return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+
+ if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ int i, count = len / sizeof(u32), ret;
+ u32 tmp;
+
+ if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ tmp = val[i];
+
+ ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
@@ -178,11 +456,14 @@ qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
u16 r1, r2, page;
int ret;
+ if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
+ return 0;
+
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -201,15 +482,18 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
u16 r1, r2, page;
int ret;
+ if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
+ return 0;
+
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
@@ -225,11 +509,14 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
u32 val;
int ret;
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -239,7 +526,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
val &= ~mask;
val |= write_val;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
@@ -296,17 +583,13 @@ qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
- u32 reg[4], val;
- int i, ret;
+ u32 reg[3];
+ int ret;
/* load the ARL table into an array */
- for (i = 0; i < 4; i++) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
- if (ret < 0)
- return ret;
-
- reg[i] = val;
- }
+ ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ if (ret)
+ return ret;
/* vid - 83:72 */
fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
@@ -330,7 +613,6 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
u8 aging)
{
u32 reg[3] = { 0 };
- int i;
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
@@ -347,8 +629,7 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
- for (i = 0; i < 3; i++)
- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+ qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
}
static int
@@ -632,7 +913,10 @@ qca8k_mib_init(struct qca8k_priv *priv)
int ret;
mutex_lock(&priv->reg_mutex);
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
if (ret)
goto exit;
@@ -666,6 +950,199 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
+static int
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ struct sk_buff *read_skb, u32 *val)
+{
+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ bool ack;
+ int ret;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the copy pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ *val = mgmt_eth_data->data[0];
+
+ return 0;
+}
+
+static int
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ int regnum, u16 data)
+{
+ struct sk_buff *write_skb, *clear_skb, *read_skb;
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ u32 write_val, clear_val = 0, val;
+ struct net_device *mgmt_master;
+ int ret, ret1;
+ bool ack;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ if (read) {
+ write_val |= QCA8K_MDIO_MASTER_READ;
+ } else {
+ write_val |= QCA8K_MDIO_MASTER_WRITE;
+ write_val |= QCA8K_MDIO_MASTER_DATA(data);
+ }
+
+ /* Prealloc all the needed skb before the lock */
+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+ if (!write_skb)
+ return -ENOMEM;
+
+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!clear_skb) {
+ ret = -ENOMEM;
+ goto err_clear_skb;
+ }
+
+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!read_skb) {
+ ret = -ENOMEM;
+ goto err_read_skb;
+ }
+
+ /* Actually start the request:
+ * 1. Send mdio master packet
+ * 2. Busy Wait for mdio master command
+ * 3. Get the data if we are reading
+ * 4. Reset the mdio master (even with error)
+ */
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if mgmt_master is operational */
+ mgmt_master = priv->mgmt_master;
+ if (!mgmt_master) {
+ mutex_unlock(&mgmt_eth_data->mutex);
+ ret = -EINVAL;
+ goto err_mgmt_master;
+ }
+
+ read_skb->dev = mgmt_master;
+ clear_skb->dev = mgmt_master;
+ write_skb->dev = mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the write pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(write_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+ !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ mgmt_eth_data, read_skb, &val);
+
+ if (ret < 0 && ret1 < 0) {
+ ret = ret1;
+ goto exit;
+ }
+
+ if (read) {
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the read pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(read_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
+ } else {
+ kfree_skb(read_skb);
+ }
+exit:
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the clear pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(clear_skb);
+
+ wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ return ret;
+
+ /* Error handling before lock */
+err_mgmt_master:
+ kfree_skb(read_skb);
+err_read_skb:
+ kfree_skb(clear_skb);
+err_clear_skb:
+ kfree_skb(write_skb);
+
+ return ret;
+}
+
static u32
qca8k_port_to_phy(int port)
{
@@ -704,8 +1181,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
-qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -722,18 +1200,18 @@ qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -741,8 +1219,9 @@ exit:
}
static int
-qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -758,11 +1237,11 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
@@ -773,7 +1252,7 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -787,24 +1266,35 @@ static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
+ int ret;
- return qca8k_mdio_write(bus, phy, regnum, data);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+ if (!ret)
+ return 0;
+
+ return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+ if (ret >= 0)
+ return ret;
- return qca8k_mdio_read(bus, phy, regnum);
+ return qca8k_mdio_read(priv, phy, regnum);
}
static int
qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
{
struct qca8k_priv *priv = ds->priv;
+ int ret;
/* Check if the legacy mapping should be used and the
* port is not correctly mapped to the right PHY in the
@@ -813,7 +1303,12 @@ qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
if (priv->legacy_phy_port_mapping)
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- return qca8k_mdio_write(priv->bus, port, regnum, data);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
+ if (!ret)
+ return ret;
+
+ return qca8k_mdio_write(priv, port, regnum, data);
}
static int
@@ -829,7 +1324,12 @@ qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
if (priv->legacy_phy_port_mapping)
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- ret = qca8k_mdio_read(priv->bus, port, regnum);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
+ if (ret >= 0)
+ return ret;
+
+ ret = qca8k_mdio_read(priv, port, regnum);
if (ret < 0)
return 0xffff;
@@ -1132,220 +1632,6 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
return 0;
}
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, ret, i;
- u32 mask;
-
- cpu_port = qca8k_find_cpu_port(ds);
- if (cpu_port < 0) {
- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
- return cpu_port;
- }
-
- /* Parse CPU port config to be later used in phy_link mac_config */
- ret = qca8k_parse_port_config(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mdio_bus(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_of_pws_reg(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mac_pwr_sel(priv);
- if (ret)
- return ret;
-
- /* Make sure MAC06 is disabled */
- ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
- if (ret) {
- dev_err(priv->dev, "failed disabling MAC06 exchange");
- return ret;
- }
-
- /* Enable CPU Port */
- ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
- }
-
- /* Enable MIB counters */
- ret = qca8k_mib_init(priv);
- if (ret)
- dev_warn(priv->dev, "mib init failed");
-
- /* Initial setup of all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
- if (ret)
- return ret;
-
- /* Enable QCA header mode on all cpu ports */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
- FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
- }
-
- /* Disable MAC by default on all user ports */
- if (dsa_is_user_port(ds, i))
- qca8k_port_set_status(priv, i, 0);
- }
-
- /* Forward all unknown frames to CPU port for Linux processing
- * Notice that in multi-cpu config only one port should be set
- * for igmp, unknown, multicast and broadcast packet
- */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
- FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
- if (ret)
- return ret;
-
- /* Setup connection between CPU port & user ports
- * Configure specific switch configuration for ports
- */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* CPU port gets connected to all user ports of the switch */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
- if (ret)
- return ret;
- }
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(cpu_port));
- if (ret)
- return ret;
-
- /* Enable ARP Auto-learning by default */
- ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
- /* For port based vlans to work we need to set the
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- QCA8K_EGREES_VLAN_PORT_MASK(i),
- QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
- }
-
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- switch (i) {
- /* The 2 CPU port and port 5 requires some different
- * priority than any other ports.
- */
- case 0:
- case 5:
- case 6:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
- break;
- default:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
- }
- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
-
- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN;
- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN,
- mask);
- }
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
- }
-
- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
- mask);
- }
-
- /* Setup our port MTUs to match power on defaults */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
- if (ret)
- dev_warn(priv->dev, "failed setting MTU settings");
-
- /* Flush the FDB table */
- qca8k_fdb_flush(priv);
-
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
- /* Set min a max ageing value supported */
- ds->ageing_time_min = 7000;
- ds->ageing_time_max = 458745000;
-
- /* Set max number of LAGs supported */
- ds->num_lag_ids = QCA8K_NUM_LAGS;
-
- return 0;
-}
-
static void
qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
@@ -1387,13 +1673,41 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
}
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct phylink_pcs *pcs = NULL;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ switch (port) {
+ case 0:
+ pcs = &priv->pcs_port_0.pcs;
+ break;
+
+ case 6:
+ pcs = &priv->pcs_port_6.pcs;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return pcs;
+}
+
static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
- int cpu_port_index, ret;
- u32 reg, val;
+ int cpu_port_index;
+ u32 reg;
switch (port) {
case 0: /* 1st CPU port */
@@ -1459,70 +1773,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-
- /* Enable/disable SerDes auto-negotiation as necessary */
- ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
- if (ret)
- return;
- if (phylink_autoneg_inband(mode))
- val &= ~QCA8K_PWS_SERDES_AEN_DIS;
- else
- val |= QCA8K_PWS_SERDES_AEN_DIS;
- qca8k_write(priv, QCA8K_REG_PWS, val);
-
- /* Configure the SGMII parameters */
- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
- if (ret)
- return;
-
- val |= QCA8K_SGMII_EN_SD;
-
- if (priv->ports_config.sgmii_enable_pll)
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX;
-
- if (dsa_is_cpu_port(ds, port)) {
- /* CPU port, we're talking to the CPU MAC, be a PHY */
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_PHY;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_MAC;
- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_BASEX;
- }
-
- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
- * falling edge is set writing in the PORT0 PAD reg
- */
- if (priv->switch_id == QCA8K_ID_QCA8327 ||
- priv->switch_id == QCA8K_ID_QCA8337)
- reg = QCA8K_REG_PORT0_PAD_CTRL;
-
- val = 0;
-
- /* SGMII Clock phase configuration */
- if (priv->ports_config.sgmii_rx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
-
- if (priv->ports_config.sgmii_tx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
-
- if (val)
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
- val);
-
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1531,109 +1781,41 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void
-qca8k_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
break;
+
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
-unsupported:
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- phylink_set(mask, 1000baseX_Full);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int
-qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
- int ret;
-
- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
- if (ret < 0)
- return ret;
-
- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
- state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
- DUPLEX_HALF;
-
- switch (reg & QCA8K_PORT_STATUS_SPEED) {
- case QCA8K_PORT_STATUS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case QCA8K_PORT_STATUS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case QCA8K_PORT_STATUS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
+ case 6: /* 2nd CPU port / external PHY */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
break;
}
- state->pause = MLO_PAUSE_NONE;
- if (reg & QCA8K_PORT_STATUS_RXFLOW)
- state->pause |= MLO_PAUSE_RX;
- if (reg & QCA8K_PORT_STATUS_TXFLOW)
- state->pause |= MLO_PAUSE_TX;
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- return 1;
+ config->legacy_pre_march2020 = false;
}
static void
@@ -1686,6 +1868,164 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
}
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct qca8k_pcs, pcs);
+}
+
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int port = pcs_to_qca8k_pcs(pcs)->port;
+ u32 reg;
+ int ret;
+
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int cpu_port_index, ret, port;
+ u32 reg, val;
+
+ port = pcs_to_qca8k_pcs(pcs)->port;
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Enable/disable SerDes auto-negotiation as necessary */
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return ret;
+ if (phylink_autoneg_inband(mode))
+ val &= ~QCA8K_PWS_SERDES_AEN_DIS;
+ else
+ val |= QCA8K_PWS_SERDES_AEN_DIS;
+ qca8k_write(priv, QCA8K_REG_PWS, val);
+
+ /* Configure the SGMII parameters */
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
+
+ if (dsa_is_cpu_port(priv->ds, port)) {
+ /* CPU port, we're talking to the CPU MAC, be a PHY */
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_PHY;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_MAC;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+ }
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ return 0;
+}
+
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+ .pcs_get_state = qca8k_pcs_get_state,
+ .pcs_config = qca8k_pcs_config,
+ .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+ int port)
+{
+ qpcs->pcs.ops = &qca8k_pcs_ops;
+
+ /* We don't have interrupts for link changes, so we need to poll */
+ qpcs->pcs.poll = true;
+ qpcs->priv = priv;
+ qpcs->port = port;
+}
+
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
@@ -1703,6 +2043,97 @@ qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
ETH_GSTRING_LEN);
}
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ const struct qca8k_match_data *match_data;
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ struct mib_ethhdr *mib_ethhdr;
+ int i, mib_len, offset = 0;
+ u64 *data;
+ u8 port;
+
+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+ mib_eth_data = &priv->mib_eth_data;
+
+ /* The switch autocast every port. Ignore other packet and
+ * parse only the requested one.
+ */
+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+ if (port != mib_eth_data->req_port)
+ goto exit;
+
+ match_data = device_get_match_data(priv->dev);
+ data = mib_eth_data->data;
+
+ for (i = 0; i < match_data->mib_count; i++) {
+ mib = &ar8327_mib[i];
+
+ /* First 3 mib are present in the skb head */
+ if (i < 3) {
+ data[i] = mib_ethhdr->data[i];
+ continue;
+ }
+
+ mib_len = sizeof(uint32_t);
+
+ /* Some mib are 64 bit wide */
+ if (mib->size == 2)
+ mib_len = sizeof(uint64_t);
+
+ /* Copy the mib value from packet to the */
+ memcpy(data + i, skb->data + offset, mib_len);
+
+ /* Set the offset for the next mib */
+ offset += mib_len;
+ }
+
+exit:
+ /* Complete on receiving all the mib packet */
+ if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+ complete(&mib_eth_data->rw_done);
+}
+
+static int
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ mib_eth_data = &priv->mib_eth_data;
+
+ mutex_lock(&mib_eth_data->mutex);
+
+ reinit_completion(&mib_eth_data->rw_done);
+
+ mib_eth_data->req_port = dp->index;
+ mib_eth_data->data = data;
+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Send mib autocast request */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+ QCA8K_MIB_BUSY);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ if (ret)
+ goto exit;
+
+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
+
+exit:
+ mutex_unlock(&mib_eth_data->mutex);
+
+ return ret;
+}
+
static void
qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
@@ -1714,6 +2145,10 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
u32 hi = 0;
int ret;
+ if (priv->mgmt_master &&
+ qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
+ return;
+
match_data = of_device_get_match_data(priv->dev);
for (i = 0; i < match_data->mib_count; i++) {
@@ -1812,7 +2247,8 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask, cpu_port;
@@ -1963,7 +2399,8 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
static int
qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
@@ -1973,7 +2410,8 @@ qca8k_port_fdb_add(struct dsa_switch *ds, int port,
static int
qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
@@ -2010,7 +2448,8 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
static int
qca8k_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -2021,7 +2460,8 @@ qca8k_port_mdb_add(struct dsa_switch *ds, int port,
static int
qca8k_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -2033,7 +2473,7 @@ qca8k_port_mdb_del(struct dsa_switch *ds, int port,
static int
qca8k_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int monitor_port, ret;
@@ -2212,18 +2652,16 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
}
static bool
-qca8k_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
+qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
@@ -2241,16 +2679,14 @@ qca8k_lag_can_offload(struct dsa_switch *ds,
}
static int
-qca8k_lag_setup_hash(struct dsa_switch *ds,
- struct net_device *lag,
+qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
+ struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
+ unsigned int i;
u32 hash = 0;
- int i, id;
-
- id = dsa_lag_id(ds->dst, lag);
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
@@ -2267,7 +2703,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
- if (i != id && dsa_lag_dev(ds->dst, i)) {
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
@@ -2282,7 +2718,7 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
if (unique_lag) {
priv->lag_hash_mode = hash;
} else if (priv->lag_hash_mode != hash) {
- netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
return -EOPNOTSUPP;
}
@@ -2292,13 +2728,14 @@ qca8k_lag_setup_hash(struct dsa_switch *ds,
static int
qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
- struct net_device *lag, bool delete)
+ struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
@@ -2360,8 +2797,7 @@ qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
static int
-qca8k_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
+qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
@@ -2378,11 +2814,265 @@ qca8k_port_lag_join(struct dsa_switch *ds, int port,
static int
qca8k_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
+static void
+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
+ bool operational)
+{
+ struct dsa_port *dp = master->dsa_ptr;
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+ if (dp->index != 0)
+ return;
+
+ mutex_lock(&priv->mgmt_eth_data.mutex);
+ mutex_lock(&priv->mib_eth_data.mutex);
+
+ priv->mgmt_master = operational ? (struct net_device *)master : NULL;
+
+ mutex_unlock(&priv->mib_eth_data.mutex);
+ mutex_unlock(&priv->mgmt_eth_data.mutex);
+}
+
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct qca_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_QCA:
+ tagger_data = ds->tagger_data;
+
+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int cpu_port, ret, i;
+ u32 mask;
+
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
+ }
+
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
+ if (ret)
+ return ret;
+
+ qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+ qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+ /* Make sure MAC06 is disabled */
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
+ /* Enable CPU Port */
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
+
+ /* Initial setup of all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
+
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
+
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+ }
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+ if (ret)
+ return ret;
+
+ /* Enable ARP Auto-learning by default */
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
+ switch (i) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+ }
+
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
+ * Set per port MTU to 1500 as the MTU change function
+ * will add the overhead and if its set to 1518 then it
+ * will apply the overhead again and we will end up with
+ * MTU of 1536 instead of 1518
+ */
+ priv->port_mtu[i] = ETH_DATA_LEN;
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
+ return 0;
+}
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
@@ -2410,14 +3100,16 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
- .phylink_validate = qca8k_phylink_validate,
- .phylink_mac_link_state = qca8k_phylink_mac_link_state,
+ .phylink_get_caps = qca8k_phylink_get_caps,
+ .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
.phylink_mac_config = qca8k_phylink_mac_config,
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
+ .master_state_change = qca8k_master_change,
+ .connect_tag_protocol = qca8k_connect_tag_protocol,
};
static int qca8k_read_switch_id(struct qca8k_priv *priv)
@@ -2488,6 +3180,10 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return PTR_ERR(priv->regmap);
}
+ priv->mdio_cache.page = 0xffff;
+ priv->mdio_cache.lo = 0xffff;
+ priv->mdio_cache.hi = 0xffff;
+
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
@@ -2497,6 +3193,12 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv->ds)
return -ENOMEM;
+ mutex_init(&priv->mgmt_eth_data.mutex);
+ init_completion(&priv->mgmt_eth_data.rw_done);
+
+ mutex_init(&priv->mib_eth_data.mutex);
+ init_completion(&priv->mib_eth_data.rw_done);
+
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index ab4a417b25a9..f375627174c8 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -11,6 +11,11 @@
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
+#include <linux/dsa/tag_qca.h>
+
+#define QCA8K_ETHERNET_MDIO_PRIORITY 7
+#define QCA8K_ETHERNET_PHY_PRIORITY 6
+#define QCA8K_ETHERNET_TIMEOUT 100
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -63,7 +68,7 @@
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
#define QCA8K_REG_MIB 0x034
-#define QCA8K_MIB_FLUSH BIT(24)
+#define QCA8K_MIB_FUNC GENMASK(26, 24)
#define QCA8K_MIB_CPU_KEEP BIT(20)
#define QCA8K_MIB_BUSY BIT(17)
#define QCA8K_MDIO_MASTER_CTRL 0x3c
@@ -313,6 +318,12 @@ enum qca8k_vlan_cmd {
QCA8K_VLAN_READ = 6,
};
+enum qca8k_mid_cmd {
+ QCA8K_MIB_FLUSH = 1,
+ QCA8K_MIB_FLUSH_PORT = 2,
+ QCA8K_MIB_CAST = 3,
+};
+
struct ar8xxx_port_status {
int enabled;
};
@@ -328,6 +339,22 @@ enum {
QCA8K_CPU_PORT6,
};
+struct qca8k_mgmt_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Enforce one mdio read/write at time */
+ bool ack;
+ u32 seq;
+ u32 data[4];
+};
+
+struct qca8k_mib_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Process one command at time */
+ refcount_t port_parsed; /* Counter to track parsed port */
+ u8 req_port;
+ u64 *data; /* pointer to ethtool data */
+};
+
struct qca8k_ports_config {
bool sgmii_rx_clk_falling_edge;
bool sgmii_tx_clk_falling_edge;
@@ -336,6 +363,25 @@ struct qca8k_ports_config {
u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
+struct qca8k_mdio_cache {
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+ u16 page;
+/* lo and hi can also be cached and from Documentation we can skip one
+ * extra mdio write if lo or hi is didn't change.
+ */
+ u16 lo;
+ u16 hi;
+};
+
+struct qca8k_pcs {
+ struct phylink_pcs pcs;
+ struct qca8k_priv *priv;
+ int port;
+};
+
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
@@ -353,6 +399,12 @@ struct qca8k_priv {
struct dsa_switch_ops ops;
struct gpio_desc *reset_gpio;
unsigned int port_mtu[QCA8K_NUM_PORTS];
+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
+ struct qca8k_mgmt_eth_data mgmt_eth_data;
+ struct qca8k_mib_eth_data mib_eth_data;
+ struct qca8k_mdio_cache mdio_cache;
+ struct qca8k_pcs pcs_port_0;
+ struct qca8k_pcs pcs_port_6;
};
struct qca8k_mib_desc {
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
deleted file mode 100644
index aae46ada8d83..000000000000
--- a/drivers/net/dsa/realtek-smi-core.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Realtek Simple Management Interface (SMI) driver
- * It can be discussed how "simple" this interface is.
- *
- * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
- * but the protocol is not MDIO at all. Instead it is a Realtek
- * pecularity that need to bit-bang the lines in a special way to
- * communicate with the switch.
- *
- * ASICs we intend to support with this driver:
- *
- * RTL8366 - The original version, apparently
- * RTL8369 - Similar enough to have the same datsheet as RTL8366
- * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
- * different register layout from the other two
- * RTL8366S - Is this "RTL8366 super"?
- * RTL8367 - Has an OpenWRT driver as well
- * RTL8368S - Seems to be an alternative name for RTL8366RB
- * RTL8370 - Also uses SMI
- *
- * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
- * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
- * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
- * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/bitops.h>
-#include <linux/if_bridge.h>
-
-#include "realtek-smi-core.h"
-
-#define REALTEK_SMI_ACK_RETRY_COUNT 5
-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
-
-static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
-{
- ndelay(smi->clk_delay);
-}
-
-static void realtek_smi_start(struct realtek_smi *smi)
-{
- /* Set GPIO pins to output mode, with initial state:
- * SCK = 0, SDA = 1
- */
- gpiod_direction_output(smi->mdc, 0);
- gpiod_direction_output(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
-
- /* CLK 1: 0 -> 1, 1 -> 0 */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
-
- /* CLK 2: */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
-}
-
-static void realtek_smi_stop(struct realtek_smi *smi)
-{
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Add a click */
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Set GPIO pins to input mode */
- gpiod_direction_input(smi->mdio);
- gpiod_direction_input(smi->mdc);
-}
-
-static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
-{
- for (; len > 0; len--) {
- realtek_smi_clk_delay(smi);
-
- /* Prepare data */
- gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- }
-}
-
-static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
-{
- gpiod_direction_input(smi->mdio);
-
- for (*data = 0; len > 0; len--) {
- u32 u;
-
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- u = !!gpiod_get_value(smi->mdio);
- gpiod_set_value(smi->mdc, 0);
-
- *data |= (u << (len - 1));
- }
-
- gpiod_direction_output(smi->mdio, 0);
-}
-
-static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
-{
- int retry_cnt;
-
- retry_cnt = 0;
- do {
- u32 ack;
-
- realtek_smi_read_bits(smi, 1, &ack);
- if (ack == 0)
- break;
-
- if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
- dev_err(smi->dev, "ACK timeout\n");
- return -ETIMEDOUT;
- }
- } while (1);
-
- return 0;
-}
-
-static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return realtek_smi_wait_for_ack(smi);
-}
-
-static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return 0;
-}
-
-static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x00, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x01, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
-{
- unsigned long flags;
- u8 lo = 0;
- u8 hi = 0;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send READ command */
- ret = realtek_smi_write_byte(smi, smi->cmd_read);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Read DATA[7:0] */
- realtek_smi_read_byte0(smi, &lo);
- /* Read DATA[15:8] */
- realtek_smi_read_byte1(smi, &hi);
-
- *data = ((u32)lo) | (((u32)hi) << 8);
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-static int realtek_smi_write_reg(struct realtek_smi *smi,
- u32 addr, u32 data, bool ack)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send WRITE command */
- ret = realtek_smi_write_byte(smi, smi->cmd_write);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Write DATA[7:0] */
- ret = realtek_smi_write_byte(smi, data & 0xff);
- if (ret)
- goto out;
-
- /* Write DATA[15:8] */
- if (ack)
- ret = realtek_smi_write_byte(smi, data >> 8);
- else
- ret = realtek_smi_write_byte_noack(smi, data >> 8);
- if (ret)
- goto out;
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-/* There is one single case when we need to use this accessor and that
- * is when issueing soft reset. Since the device reset as soon as we write
- * that bit, no ACK will come back for natural reasons.
- */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data)
-{
- return realtek_smi_write_reg(smi, addr, data, false);
-}
-EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
-
-/* Regmap accessors */
-
-static int realtek_smi_write(void *ctx, u32 reg, u32 val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_write_reg(smi, reg, val, true);
-}
-
-static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_read_reg(smi, reg, val);
-}
-
-static const struct regmap_config realtek_smi_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_read(smi, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_write(smi, addr, regnum, val);
-}
-
-int realtek_smi_setup_mdio(struct realtek_smi *smi)
-{
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(smi->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- smi->slave_mii_bus->priv = smi;
- smi->slave_mii_bus->name = "SMI slave MII";
- smi->slave_mii_bus->read = realtek_smi_mdio_read;
- smi->slave_mii_bus->write = realtek_smi_mdio_write;
- snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- smi->ds->index);
- smi->slave_mii_bus->dev.of_node = mdio_np;
- smi->slave_mii_bus->parent = smi->dev;
- smi->ds->slave_mii_bus = smi->slave_mii_bus;
-
- ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
- if (ret) {
- dev_err(smi->dev, "unable to register MDIO bus %s\n",
- smi->slave_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
-{
- const struct realtek_smi_variant *var;
- struct device *dev = &pdev->dev;
- struct realtek_smi *smi;
- struct device_node *np;
- int ret;
-
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
- if (!smi)
- return -ENOMEM;
- smi->chip_data = (void *)smi + sizeof(*smi);
- smi->map = devm_regmap_init(dev, NULL, smi,
- &realtek_smi_mdio_regmap_config);
- if (IS_ERR(smi->map)) {
- ret = PTR_ERR(smi->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- smi->dev = dev;
- smi->clk_delay = var->clk_delay;
- smi->cmd_read = var->cmd_read;
- smi->cmd_write = var->cmd_write;
- smi->ops = var->ops;
-
- dev_set_drvdata(dev, smi);
- spin_lock_init(&smi->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- /* Assert then deassert RESET */
- smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(smi->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(smi->reset);
- }
- msleep(REALTEK_SMI_HW_STOP_DELAY);
- gpiod_set_value(smi->reset, 0);
- msleep(REALTEK_SMI_HW_START_DELAY);
- dev_info(dev, "deasserted RESET\n");
-
- /* Fetch MDIO pins */
- smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdc))
- return PTR_ERR(smi->mdc);
- smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdio))
- return PTR_ERR(smi->mdio);
-
- smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = smi->ops->detect(smi);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
- if (!smi->ds)
- return -ENOMEM;
-
- smi->ds->dev = dev;
- smi->ds->num_ports = smi->num_ports;
- smi->ds->priv = smi;
-
- smi->ds->ops = var->ds_ops;
- ret = dsa_register_switch(smi->ds);
- if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
- return ret;
- }
- return 0;
-}
-
-static int realtek_smi_remove(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return 0;
-
- dsa_unregister_switch(smi->ds);
- if (smi->slave_mii_bus)
- of_node_put(smi->slave_mii_bus->dev.of_node);
- gpiod_set_value(smi->reset, 1);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void realtek_smi_shutdown(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return;
-
- dsa_switch_shutdown(smi->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct of_device_id realtek_smi_of_match[] = {
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = of_match_ptr(realtek_smi_of_match),
- },
- .probe = realtek_smi_probe,
- .remove = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
new file mode 100644
index 000000000000..b7427a8292b2
--- /dev/null
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_REALTEK
+ tristate "Realtek Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ help
+ Select to enable support for Realtek Ethernet switch chips.
+
+config NET_DSA_REALTEK_MDIO
+ tristate "Realtek MDIO connected switch driver"
+ depends on NET_DSA_REALTEK
+ help
+ Select to enable support for registering switches configured
+ through MDIO.
+
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI connected switch driver"
+ depends on NET_DSA_REALTEK
+ help
+ Select to enable support for registering switches connected
+ through SMI.
+
+config NET_DSA_REALTEK_RTL8365MB
+ tristate "Realtek RTL8365MB switch subdriver"
+ depends on NET_DSA_REALTEK
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL8_4
+ help
+ Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
+
+config NET_DSA_REALTEK_RTL8366RB
+ tristate "Realtek RTL8366RB switch subdriver"
+ depends on NET_DSA_REALTEK
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL4_A
+ help
+ Select to enable support for Realtek RTL8366RB
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
new file mode 100644
index 000000000000..0aab57252a7c
--- /dev/null
+++ b/drivers/net/dsa/realtek/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+rtl8366-objs := rtl8366-core.o rtl8366rb.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
new file mode 100644
index 000000000000..31e1f100e48e
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek MDIO interface driver
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+
+/* Read/write via mdiobus */
+#define REALTEK_MDIO_CTRL0_REG 31
+#define REALTEK_MDIO_START_REG 29
+#define REALTEK_MDIO_CTRL1_REG 21
+#define REALTEK_MDIO_ADDRESS_REG 23
+#define REALTEK_MDIO_DATA_WRITE_REG 24
+#define REALTEK_MDIO_DATA_READ_REG 25
+
+#define REALTEK_MDIO_START_OP 0xFFFF
+#define REALTEK_MDIO_ADDR_OP 0x000E
+#define REALTEK_MDIO_READ_OP 0x0001
+#define REALTEK_MDIO_WRITE_OP 0x0003
+
+static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static void realtek_mdio_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_mdio_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_mdio_lock,
+ .unlock = realtek_mdio_unlock,
+};
+
+static const struct regmap_config realtek_mdio_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv;
+ struct device *dev = &mdiodev->dev;
+ const struct realtek_variant *var;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_mdio_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_mdio_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->mdio_addr = mdiodev->addr;
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->write_reg_noack = realtek_mdio_write;
+
+ np = dev->of_node;
+
+ dev_set_drvdata(dev, priv);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = var->ds_ops_mdio;
+
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id realtek_mdio_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
+
+static struct mdio_driver realtek_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "realtek-mdio",
+ .of_match_table = of_match_ptr(realtek_mdio_of_match),
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+mdio_module_driver(realtek_mdio_driver);
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
new file mode 100644
index 000000000000..2243d3da55b2
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+
+static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
+{
+ ndelay(priv->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_priv *priv)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(priv->mdc, 0);
+ gpiod_direction_output(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 2: */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_priv *priv)
+{
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(priv->mdio);
+ gpiod_direction_input(priv->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(priv);
+
+ /* Prepare data */
+ gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
+{
+ gpiod_direction_input(priv->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ u = !!gpiod_get_value(priv->mdio);
+ gpiod_set_value(priv->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(priv->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(priv, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(priv->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return realtek_smi_wait_for_ack(priv);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(priv, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(priv, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_priv *priv,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(priv, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(priv, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(priv, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
+{
+ return realtek_smi_write_reg(ctx, reg, val, false);
+}
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_write_reg(priv, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_read_reg(priv, reg, val);
+}
+
+static void realtek_smi_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_smi_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_smi_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_smi_lock,
+ .unlock = realtek_smi_unlock,
+};
+
+static const struct regmap_config realtek_smi_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+static int realtek_smi_setup_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!priv->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "SMI slave MII";
+ priv->slave_mii_bus->read = realtek_smi_mdio_read;
+ priv->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ ds->index);
+ priv->slave_mii_bus->dev.of_node = mdio_np;
+ priv->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+
+ ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ priv->slave_mii_bus->id);
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_priv *priv;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_smi_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_smi_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->setup_interface = realtek_smi_setup_mdio;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
+
+ dev_set_drvdata(dev, priv);
+ spin_lock_init(&priv->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ /* Fetch MDIO pins */
+ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdc))
+ return PTR_ERR(priv->mdc);
+ priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+
+ priv->ds->ops = var->ds_ops_smi;
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "unable to register switch\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+ if (priv->slave_mii_bus)
+ of_node_put(priv->slave_mii_bus->dev.of_node);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+#endif
+ {
+ /* FIXME: add support for RTL8366S and more */
+ .compatible = "realtek,rtl8366s",
+ .data = NULL,
+ },
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
+ {
+ .compatible = "realtek,rtl8367s",
+ .data = &rtl8365mb_variant,
+ },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek.h
index 5bfa53e2480a..4fa7c6ba874a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -5,15 +5,18 @@
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*/
-#ifndef _REALTEK_SMI_H
-#define _REALTEK_SMI_H
+#ifndef _REALTEK_H
+#define _REALTEK_H
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
-struct realtek_smi_ops;
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
+struct realtek_ops;
struct dentry;
struct inode;
struct file;
@@ -25,7 +28,7 @@ struct rtl8366_mib_counter {
const char *name;
};
-/**
+/*
* struct rtl8366_vlan_mc - Virtual LAN member configuration
*/
struct rtl8366_vlan_mc {
@@ -43,13 +46,17 @@ struct rtl8366_vlan_4k {
u8 fid;
};
-struct realtek_smi {
+struct realtek_priv {
struct device *dev;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
struct regmap *map;
+ struct regmap *map_nolock;
+ struct mutex map_lock;
struct mii_bus *slave_mii_bus;
+ struct mii_bus *bus;
+ int mdio_addr;
unsigned int clk_delay;
u8 cmd_read;
@@ -65,7 +72,9 @@ struct realtek_smi {
unsigned int num_mib_counters;
struct rtl8366_mib_counter *mib_counters;
- const struct realtek_smi_ops *ops;
+ const struct realtek_ops *ops;
+ int (*setup_interface)(struct dsa_switch *ds);
+ int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
int vlan4k_enabled;
@@ -74,61 +83,57 @@ struct realtek_smi {
void *chip_data; /* Per-chip extra variant data */
};
-/**
- * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+/*
+ * struct realtek_ops - vtable for the per-SMI-chiptype operations
* @detect: detects the chiptype
*/
-struct realtek_smi_ops {
- int (*detect)(struct realtek_smi *smi);
- int (*reset_chip)(struct realtek_smi *smi);
- int (*setup)(struct realtek_smi *smi);
- void (*cleanup)(struct realtek_smi *smi);
- int (*get_mib_counter)(struct realtek_smi *smi,
+struct realtek_ops {
+ int (*detect)(struct realtek_priv *priv);
+ int (*reset_chip)(struct realtek_priv *priv);
+ int (*setup)(struct realtek_priv *priv);
+ void (*cleanup)(struct realtek_priv *priv);
+ int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue);
- int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*get_vlan_mc)(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc);
- int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*set_vlan_mc)(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc);
- int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ int (*get_vlan_4k)(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k);
- int (*set_vlan_4k)(struct realtek_smi *smi,
+ int (*set_vlan_4k)(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k);
- int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
- int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
- bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
- int (*enable_vlan)(struct realtek_smi *smi, bool enable);
- int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
- int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
- int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
- int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ int (*get_mc_index)(struct realtek_priv *priv, int port, int *val);
+ int (*set_mc_index)(struct realtek_priv *priv, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_priv *priv, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_priv *priv, bool enable);
+ int (*enable_vlan4k)(struct realtek_priv *priv, bool enable);
+ int (*enable_port)(struct realtek_priv *priv, int port, bool enable);
+ int (*phy_read)(struct realtek_priv *priv, int phy, int regnum);
+ int (*phy_write)(struct realtek_priv *priv, int phy, int regnum,
u16 val);
};
-struct realtek_smi_variant {
- const struct dsa_switch_ops *ds_ops;
- const struct realtek_smi_ops *ops;
+struct realtek_variant {
+ const struct dsa_switch_ops *ds_ops_smi;
+ const struct dsa_switch_ops *ds_ops_mdio;
+ const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
size_t chip_data_sz;
};
-/* SMI core calls */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data);
-int realtek_smi_setup_mdio(struct realtek_smi *smi);
-
/* RTL8366 library helpers */
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
-int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable);
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable);
+int rtl8366_reset_vlan(struct realtek_priv *priv);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -139,7 +144,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-extern const struct realtek_smi_variant rtl8366rb_variant;
-extern const struct realtek_smi_variant rtl8365mb_variant;
+extern const struct realtek_variant rtl8366rb_variant;
+extern const struct realtek_variant rtl8365mb_variant;
-#endif /* _REALTEK_SMI_H */
+#endif /* _REALTEK_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 3b729544798b..3d70e8a77ecf 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -99,18 +99,28 @@
#include <linux/regmap.h>
#include <linux/if_bridge.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
/* Chip-specific data and limits */
-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
-#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
-#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
+#define RTL8365MB_CHIP_VER_8365MB_VC 0x0040
+
+#define RTL8365MB_CHIP_ID_8367S 0x6367
+#define RTL8365MB_CHIP_VER_8367S 0x00A0
+
+#define RTL8365MB_CHIP_ID_8367RB 0x6367
+#define RTL8365MB_CHIP_VER_8367RB 0x0020
/* Family-specific data and limits */
-#define RTL8365MB_PHYADDRMAX 7
-#define RTL8365MB_NUM_PHYREGS 32
-#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
-#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+#define RTL8365MB_PHYADDRMAX 7
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+/* RTL8370MB and RTL8310SR, possibly suportable by this driver, have 10 ports */
+#define RTL8365MB_MAX_NUM_PORTS 10
+#define RTL8365MB_LEARN_LIMIT_MAX 2112
+
+/* valid for all 6-port or less variants */
+static const int rtl8365mb_extint_port_map[] = { -1, -1, -1, -1, -1, -1, 1, 2, -1, -1};
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
@@ -191,7 +201,7 @@
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
-/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+/* EXT interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
@@ -207,39 +217,56 @@
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
-/* EXT port interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
- ((_extport) >> 1) * (0x13C3 - 0x1305))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
- (0xF << (((_extport) % 2)))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
- (((_extport) % 2) * 4)
-
-/* EXT port RGMII TX/RX delay configuration registers 1~2 */
-#define RTL8365MB_EXT_RGMXF_REG1 0x1307
-#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
-#define RTL8365MB_EXT_RGMXF_REG(_extport) \
- (RTL8365MB_EXT_RGMXF_REG1 + \
- (((_extport) >> 1) * (0x13C5 - 0x1307)))
+/* Realtek docs and driver uses logic number as EXT_PORT0=16, EXT_PORT1=17,
+ * EXT_PORT2=18, to interact with switch ports. That logic number is internally
+ * converted to either a physical port number (0..9) or an external interface id (0..2),
+ * depending on which function was called. The external interface id is calculated as
+ * (ext_id=logic_port-15), while the logical to physical map depends on the chip id/version.
+ *
+ * EXT_PORT0 mentioned in datasheets and rtl8367c driver is used in this driver
+ * as extid==1, EXT_PORT2, mentioned in Realtek rtl8367c driver for 10-port switches,
+ * would have an ext_id of 3 (out of range for most extint macros) and ext_id 0 does
+ * not seem to be used as well for this family.
+ */
+
+/* EXT interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
+ ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
+ (0xF << (((_extint) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
+ (((_extint) % 2) * 4)
+
+/* EXT interface RGMII TX/RX delay configuration registers 0~2 */
+#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
+#define RTL8365MB_EXT_RGMXF_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
+ (_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
+ (_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
+ 0x0)
#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
-/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
#define RTL8365MB_PORT_SPEED_10M 0
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
-/* EXT port force configuration registers 0~2 */
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
- ((_extport) & 0x1) + \
- ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+/* EXT interface force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
+ (_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
+ 0x0)
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
@@ -516,7 +543,7 @@ struct rtl8365mb_cpu {
/**
* struct rtl8365mb_port - private per-port data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @index: DSA port index, same as dsa_port::index
* @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
* access via rtl8365mb_get_stats64
@@ -524,7 +551,7 @@ struct rtl8365mb_cpu {
* @mib_work: delayed work for polling MIB counters
*/
struct rtl8365mb_port {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
unsigned int index;
struct rtnl_link_stats64 stats;
spinlock_t stats_lock;
@@ -533,7 +560,7 @@ struct rtl8365mb_port {
/**
* struct rtl8365mb - private chip-specific driver data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
* @chip_id: chip identifier
* @chip_ver: chip silicon revision
@@ -548,7 +575,7 @@ struct rtl8365mb_port {
* Private data for this driver.
*/
struct rtl8365mb {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
int irq;
u32 chip_id;
u32 chip_ver;
@@ -561,16 +588,16 @@ struct rtl8365mb {
size_t jam_size;
};
-static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
{
u32 val;
- return regmap_read_poll_timeout(smi->map,
+ return regmap_read_poll_timeout(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
val, !val, 10, 100);
}
-static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
u32 ocp_addr)
{
u32 val;
@@ -579,7 +606,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
/* Set OCP prefix */
val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
ret = regmap_update_bits(
- smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
if (ret)
@@ -592,89 +619,101 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
ocp_addr >> 1);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
ocp_addr >> 6);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
- val);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 *data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Execute read operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
/* Get PHY register data */
- ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
- &val);
+ ret = regmap_read(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
if (ret)
- return ret;
+ goto out;
*data = val & 0xFFFF;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
-static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Set PHY register data */
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
- data);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
if (ret)
- return ret;
+ goto out;
/* Execute write operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
+
+out:
+ mutex_unlock(&priv->map_lock);
return 0;
}
-static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 ocp_addr;
u16 val;
@@ -688,21 +727,21 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
phy, regnum, ocp_addr, val);
return val;
}
-static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 ocp_addr;
@@ -716,46 +755,67 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
phy, regnum, ocp_addr, val);
return 0;
}
+static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8365mb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
+}
+
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
+ return DSA_TAG_PROTO_RTL8_4T;
+
return DSA_TAG_PROTO_RTL8_4;
}
-static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
- int ext_port;
+ int ext_int;
u32 val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
+ ext_int = rtl8365mb_extint_port_map[port];
+
+ if (ext_int <= 0) {
+ dev_err(priv->dev, "Port %d is not an external interface port\n", port);
return -EINVAL;
}
- dp = dsa_to_port(smi->ds, port);
+ dp = dsa_to_port(priv->ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -786,8 +846,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val == 0 || val == 2)
tx_delay = val / 2;
else
- dev_warn(smi->dev,
- "EXT port TX delay must be 0 or 2 ns\n");
+ dev_warn(priv->dev,
+ "EXT interface TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
@@ -796,12 +856,12 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val <= 7)
rx_delay = val;
else
- dev_warn(smi->dev,
- "EXT port RX delay must be 0 to 2.1 ns\n");
+ dev_warn(priv->dev,
+ "EXT interface RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
- smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ priv->map, RTL8365MB_EXT_RGMXF_REG(ext_int),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
@@ -810,18 +870,18 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
return ret;
ret = regmap_update_bits(
- smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_int),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_int),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
- ext_port));
+ ext_int));
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
@@ -830,14 +890,14 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
u32 r_duplex;
u32 r_speed;
u32 r_link;
- int ext_port;
+ int ext_int;
int val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
+ ext_int = rtl8365mb_extint_port_map[port];
+
+ if (ext_int <= 0) {
+ dev_err(priv->dev, "Port %d is not an external interface port\n", port);
return -EINVAL;
}
@@ -854,7 +914,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (speed == SPEED_10) {
r_speed = RTL8365MB_PORT_SPEED_10M;
} else {
- dev_err(smi->dev, "unsupported port speed %s\n",
+ dev_err(priv->dev, "unsupported port speed %s\n",
phy_speed_to_str(speed));
return -EINVAL;
}
@@ -864,7 +924,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (duplex == DUPLEX_HALF) {
r_duplex = 0;
} else {
- dev_err(smi->dev, "unsupported duplex %s\n",
+ dev_err(priv->dev, "unsupported duplex %s\n",
phy_duplex_to_str(duplex));
return -EINVAL;
}
@@ -886,8 +946,8 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
- ret = regmap_write(smi->map,
- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ ret = regmap_write(priv->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_int),
val);
if (ret)
return ret;
@@ -898,13 +958,17 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
- if (dsa_is_user_port(ds, port) &&
+ int ext_int;
+
+ ext_int = rtl8365mb_extint_port_map[port];
+
+ if (ext_int < 0 &&
(interface == PHY_INTERFACE_MODE_NA ||
interface == PHY_INTERFACE_MODE_INTERNAL ||
interface == PHY_INTERFACE_MODE_GMII))
/* Internal PHY */
return true;
- else if (dsa_is_cpu_port(ds, port) &&
+ else if ((ext_int >= 1) &&
phy_interface_mode_is_rgmii(interface))
/* Extension MAC */
return true;
@@ -912,65 +976,43 @@ static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
return false;
}
-static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct realtek_smi *smi = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
- */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (dsa_is_user_port(ds, port))
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ else if (dsa_is_cpu_port(ds, port))
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
}
static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
phy_modes(state->interface), port);
return;
}
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
port);
return;
}
if (phy_interface_mode_is_rgmii(state->interface)) {
- ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to configure RGMII mode on port %d: %d\n",
port, ret);
return;
@@ -985,20 +1027,20 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
cancel_delayed_work_sync(&p->mib_work);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
false, false);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to reset forced mode on port %d: %d\n",
port, ret);
@@ -1013,21 +1055,21 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
schedule_delayed_work(&p->mib_work, 0);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
duplex, tx_pause,
rx_pause);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to force mode on port %d: %d\n", port,
ret);
@@ -1038,7 +1080,7 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
enum rtl8365mb_stp_state val;
int msti = 0;
@@ -1057,36 +1099,36 @@ static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
val = RTL8365MB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "invalid STP state: %u\n", state);
+ dev_err(priv->dev, "invalid STP state: %u\n", state);
return;
}
- regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
}
-static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
- return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
enable ? mb->learn_limit_max : 0);
}
-static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
u32 mask)
{
- return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+ return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
}
-static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
u32 offset, u32 length, u64 *mibvalue)
{
u64 tmpvalue = 0;
@@ -1098,13 +1140,13 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
* and then poll the control register before reading the value from some
* counter registers.
*/
- ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
RTL8365MB_MIB_ADDRESS(port, offset));
if (ret)
return ret;
/* Poll for completion */
- ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
!(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
10, 100);
if (ret)
@@ -1126,7 +1168,7 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
/* Read the MIB counter 16 bits at a time */
for (i = 0; i < length; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
if (ret)
return ret;
@@ -1142,21 +1184,21 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &data[i]);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read port %d counters: %d\n", port,
ret);
break;
@@ -1190,15 +1232,15 @@ static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&phy_stats->SymbolErrorDuringCarrier);
mutex_unlock(&mb->mib_lock);
}
@@ -1226,12 +1268,12 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
};
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
@@ -1241,7 +1283,7 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &cnt[i]);
if (ret)
break;
@@ -1291,20 +1333,20 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&ctrl_stats->UnsupportedOpcodesReceived);
mutex_unlock(&mb->mib_lock);
}
-static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
@@ -1323,7 +1365,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
};
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtnl_link_stats64 *stats;
int ret;
int i;
@@ -1338,7 +1380,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
c->length, &cnt[i]);
if (ret)
break;
@@ -1388,9 +1430,9 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
struct rtl8365mb_port,
mib_work);
- struct realtek_smi *smi = p->smi;
+ struct realtek_priv *priv = p->priv;
- rtl8365mb_stats_update(smi, p->index);
+ rtl8365mb_stats_update(priv, p->index);
schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
}
@@ -1398,11 +1440,11 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
spin_lock(&p->stats_lock);
@@ -1410,9 +1452,9 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
-static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1420,10 +1462,10 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
*/
mutex_init(&mb->mib_lock);
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1436,45 +1478,45 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
}
}
-static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
}
}
-static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
u32 *val)
{
int ret;
- ret = regmap_read(smi->map, reg, val);
+ ret = regmap_read(priv->map, reg, val);
if (ret)
return ret;
- return regmap_write(smi->map, reg, *val);
+ return regmap_write(priv->map, reg, *val);
}
static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
unsigned long line_changes = 0;
struct rtl8365mb *mb;
u32 stat;
int line;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
- ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
goto out_error;
@@ -1485,14 +1527,14 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
u32 val;
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
if (ret)
goto out_error;
linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
if (ret)
goto out_error;
@@ -1504,8 +1546,8 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
if (!line_changes)
goto out_none;
- for_each_set_bit(line, &line_changes, smi->num_ports) {
- int child_irq = irq_find_mapping(smi->irqdomain, line);
+ for_each_set_bit(line, &line_changes, priv->num_ports) {
+ int child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
@@ -1513,7 +1555,7 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
return IRQ_HANDLED;
out_error:
- dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+ dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
out_none:
return IRQ_NONE;
@@ -1548,27 +1590,27 @@ static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
{
- return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
RTL8365MB_INTR_LINK_CHANGE_MASK,
FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
enable ? 1 : 0));
}
-static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+static int rtl8365mb_irq_enable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, true);
+ return rtl8365mb_set_irq_enable(priv, true);
}
-static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+static int rtl8365mb_irq_disable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, false);
+ return rtl8365mb_set_irq_enable(priv, false);
}
-static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+static int rtl8365mb_irq_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct device_node *intc;
u32 irq_trig;
int virq;
@@ -1577,9 +1619,9 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
int ret;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
@@ -1587,24 +1629,24 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
- dev_err(smi->dev, "failed to get parent irq: %d\n",
+ dev_err(priv->dev, "failed to get parent irq: %d\n",
irq);
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
- &rtl8365mb_irqdomain_ops, smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to add irq domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_create_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_create_mapping(priv->irqdomain, i);
if (!virq) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to create irq domain mapping\n");
ret = -EINVAL;
goto out_remove_irqdomain;
@@ -1625,40 +1667,40 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
val = RTL8365MB_INTR_POLARITY_LOW;
break;
default:
- dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ dev_err(priv->dev, "unsupported irq trigger type %u\n",
irq_trig);
ret = -EINVAL;
goto out_remove_irqdomain;
}
- ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
RTL8365MB_INTR_POLARITY_MASK,
FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
if (ret)
goto out_remove_irqdomain;
/* Disable the interrupt in case the chip has it enabled on reset */
- ret = rtl8365mb_irq_disable(smi);
+ ret = rtl8365mb_irq_disable(priv);
if (ret)
goto out_remove_irqdomain;
/* Clear the interrupt status register */
- ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
RTL8365MB_INTR_ALL_MASK);
if (ret)
goto out_remove_irqdomain;
ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
- "rtl8365mb", smi);
+ "rtl8365mb", priv);
if (ret) {
- dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ dev_err(priv->dev, "failed to request irq: %d\n", ret);
goto out_remove_irqdomain;
}
/* Store the irq so that we know to free it during teardown */
mb->irq = irq;
- ret = rtl8365mb_irq_enable(smi);
+ ret = rtl8365mb_irq_enable(priv);
if (ret)
goto out_free_irq;
@@ -1667,17 +1709,17 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
return 0;
out_free_irq:
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
out_remove_irqdomain:
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
out_put_node:
of_node_put(intc);
@@ -1685,36 +1727,36 @@ out_put_node:
return ret;
}
-static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int virq;
int i;
if (mb->irq) {
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
}
- if (smi->irqdomain) {
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ if (priv->irqdomain) {
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
}
}
-static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+static int rtl8365mb_cpu_config(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtl8365mb_cpu *cpu = &mb->cpu;
u32 val;
int ret;
- ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
RTL8365MB_CPU_PORT_MASK_MASK,
FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
cpu->mask));
@@ -1726,26 +1768,57 @@ static int rtl8365mb_cpu_config(struct realtek_smi *smi)
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
- cpu->trap_port >> 3);
- ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ cpu->trap_port >> 3 & 0x1);
+ ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_switch_init(struct realtek_smi *smi)
+static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, int cpu_index,
+ enum dsa_tag_protocol proto)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_RTL8_4:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
+ break;
+ case DSA_TAG_PROTO_RTL8_4T:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
+ break;
+ /* The switch also supports a 4-byte format, similar to rtl4a but with
+ * the same 0x04 8-bit version and probably 8-bit port source/dest.
+ * There is no public doc about it. Not supported yet and it will probably
+ * never be.
+ */
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return rtl8365mb_cpu_config(priv);
+}
+
+static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int ret;
int i;
/* Do any chip-specific init jam before getting to the common stuff */
if (mb->jam_table) {
for (i = 0; i < mb->jam_size; i++) {
- ret = regmap_write(smi->map, mb->jam_table[i].reg,
+ ret = regmap_write(priv->map, mb->jam_table[i].reg,
mb->jam_table[i].val);
if (ret)
return ret;
@@ -1754,7 +1827,7 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
/* Common init jam */
for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
- ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
rtl8365mb_init_jam_common[i].val);
if (ret)
return ret;
@@ -1763,75 +1836,80 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
return 0;
}
-static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+static int rtl8365mb_reset_chip(struct realtek_priv *priv)
{
u32 val;
- realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
- FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
- 1));
+ priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
/* Realtek documentation says the chip needs 1 second to reset. Sleep
* for 100 ms before accessing any registers to prevent ACK timeouts.
*/
msleep(100);
- return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
!(val & RTL8365MB_CHIP_RESET_HW_MASK),
20000, 1e6);
}
static int rtl8365mb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct dsa_port *cpu_dp;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
- ret = rtl8365mb_reset_chip(smi);
+ ret = rtl8365mb_reset_chip(priv);
if (ret) {
- dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ dev_err(priv->dev, "failed to reset chip: %d\n", ret);
goto out_error;
}
/* Configure switch to vendor-defined initial state */
- ret = rtl8365mb_switch_init(smi);
+ ret = rtl8365mb_switch_init(priv);
if (ret) {
- dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
goto out_error;
}
/* Set up cascading IRQs */
- ret = rtl8365mb_irq_setup(smi);
+ ret = rtl8365mb_irq_setup(priv);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- ret = rtl8365mb_cpu_config(smi);
+ dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ cpu->mask |= BIT(cpu_dp->index);
+
+ if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
+ cpu->trap_port = cpu_dp->index;
+ }
+ cpu->enable = cpu->mask > 0;
+ ret = rtl8365mb_cpu_config(priv);
if (ret)
goto out_teardown_irq;
/* Configure ports */
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
- /* Set up per-port private data */
- p->smi = smi;
- p->index = i;
-
/* Forward only to the CPU */
- ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
if (ret)
goto out_teardown_irq;
/* Disable learning */
- ret = rtl8365mb_port_set_learning(smi, i, false);
+ ret = rtl8365mb_port_set_learning(priv, i, false);
if (ret)
goto out_teardown_irq;
@@ -1839,29 +1917,35 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+
+ /* Set up per-port private data */
+ p->priv = priv;
+ p->index = i;
}
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
RTL8365MB_CFG0_MAX_LEN_MASK,
FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
if (ret)
goto out_teardown_irq;
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_err(smi->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
}
/* Start statistics counter polling */
- rtl8365mb_stats_setup(smi);
+ rtl8365mb_stats_setup(priv);
return 0;
out_teardown_irq:
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_irq_teardown(priv);
out_error:
return ret;
@@ -1869,10 +1953,10 @@ out_error:
static void rtl8365mb_teardown(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
- rtl8365mb_stats_teardown(smi);
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_stats_teardown(priv);
+ rtl8365mb_irq_teardown(priv);
}
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
@@ -1902,40 +1986,55 @@ static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
return 0;
}
-static int rtl8365mb_detect(struct realtek_smi *smi)
+static int rtl8365mb_detect(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
u32 chip_id;
u32 chip_ver;
int ret;
- ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
- dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ dev_err(priv->dev, "failed to read chip id and version: %d\n",
ret);
return ret;
}
switch (chip_id) {
case RTL8365MB_CHIP_ID_8365MB_VC:
- dev_info(smi->dev,
- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
- chip_ver);
+ switch (chip_ver) {
+ case RTL8365MB_CHIP_VER_8365MB_VC:
+ dev_info(priv->dev,
+ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
+ chip_ver);
+ break;
+ case RTL8365MB_CHIP_VER_8367RB:
+ dev_info(priv->dev,
+ "found an RTL8367RB-VB switch (ver=0x%04x)\n",
+ chip_ver);
+ break;
+ case RTL8365MB_CHIP_VER_8367S:
+ dev_info(priv->dev,
+ "found an RTL8367S switch (ver=0x%04x)\n",
+ chip_ver);
+ break;
+ default:
+ dev_err(priv->dev, "unrecognized switch version (ver=0x%04x)",
+ chip_ver);
+ return -ENODEV;
+ }
- smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
- smi->num_ports = smi->cpu_port + 1;
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
- mb->smi = smi;
+ mb->priv = priv;
mb->chip_id = chip_id;
mb->chip_ver = chip_ver;
- mb->port_mask = BIT(smi->num_ports) - 1;
- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
+ mb->port_mask = GENMASK(priv->num_ports - 1, 0);
+ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX;
mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
- mb->cpu.enable = 1;
- mb->cpu.mask = BIT(smi->cpu_port);
- mb->cpu.trap_port = smi->cpu_port;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
@@ -1943,7 +2042,7 @@ static int rtl8365mb_detect(struct realtek_smi *smi)
break;
default:
- dev_err(smi->dev,
+ dev_err(priv->dev,
"found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
chip_id, chip_ver);
return -ENODEV;
@@ -1952,14 +2051,36 @@ static int rtl8365mb_detect(struct realtek_smi *smi)
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
- .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
.phylink_mac_config = rtl8365mb_phylink_mac_config,
.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .phy_read = rtl8365mb_dsa_phy_read,
+ .phy_write = rtl8365mb_dsa_phy_write,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -1970,18 +2091,23 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_stats64 = rtl8365mb_get_stats64,
};
-static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
.phy_write = rtl8365mb_phy_write,
};
-const struct realtek_smi_variant rtl8365mb_variant = {
- .ds_ops = &rtl8365mb_switch_ops,
- .ops = &rtl8365mb_smi_ops,
+const struct realtek_variant rtl8365mb_variant = {
+ .ds_ops_smi = &rtl8365mb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
+MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/realtek/rtl8366-core.c
index bdb8d8d34880..dc5f75be3017 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -11,18 +11,18 @@
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
{
int ret;
int i;
*used = 0;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
int index = 0;
- ret = smi->ops->get_mc_index(smi, i, &index);
+ ret = priv->ops->get_mc_index(priv, i, &index);
if (ret)
return ret;
@@ -38,13 +38,13 @@ EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
- * @smi: the Realtek SMI device instance
+ * @priv: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
-static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
@@ -52,10 +52,10 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
int i;
/* Try to find an existing member config entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -65,19 +65,19 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
}
/* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret) {
- dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -86,30 +86,30 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
int used;
- ret = rtl8366_mc_is_used(smi, i, &used);
+ ret = rtl8366_mc_is_used(priv, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
@@ -117,23 +117,23 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
- dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ dev_err(priv->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
@@ -141,31 +141,31 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag);
/* Update the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlan4k.member |= member;
vlan4k.untag |= untag;
vlan4k.fid = fid;
- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ ret = priv->ops->set_vlan_4k(priv, &vlan4k);
if (ret)
return ret;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
@@ -176,12 +176,12 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
vlanmc.fid = fid;
/* Commit updates to the MC entry */
- ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
if (ret)
- dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
mc, vid);
else
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
@@ -189,37 +189,37 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
- ret = smi->ops->set_mc_index(smi, port, mc);
+ ret = priv->ops->set_mc_index(priv, port, mc);
if (ret) {
- dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
}
- dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
port, vid, mc);
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
int ret;
@@ -229,52 +229,52 @@ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
*/
if (enable) {
/* Make sure VLAN is ON */
- ret = smi->ops->enable_vlan(smi, true);
+ ret = priv->ops->enable_vlan(priv, true);
if (ret)
return ret;
- smi->vlan_enabled = true;
+ priv->vlan_enabled = true;
}
- ret = smi->ops->enable_vlan4k(smi, enable);
+ ret = priv->ops->enable_vlan4k(priv, enable);
if (ret)
return ret;
- smi->vlan4k_enabled = enable;
+ priv->vlan4k_enabled = enable;
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
int ret;
- ret = smi->ops->enable_vlan(smi, enable);
+ ret = priv->ops->enable_vlan(priv, enable);
if (ret)
return ret;
- smi->vlan_enabled = enable;
+ priv->vlan_enabled = enable;
/* If we turn VLAN off, make sure that we turn off
* 4k VLAN as well, if that happened to be on.
*/
if (!enable) {
- smi->vlan4k_enabled = false;
- ret = smi->ops->enable_vlan4k(smi, false);
+ priv->vlan4k_enabled = false;
+ ret = priv->ops->enable_vlan4k(priv, false);
}
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
-int rtl8366_reset_vlan(struct realtek_smi *smi)
+int rtl8366_reset_vlan(struct realtek_priv *priv)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int i;
- rtl8366_enable_vlan(smi, false);
- rtl8366_enable_vlan4k(smi, false);
+ rtl8366_enable_vlan(priv, false);
+ rtl8366_enable_vlan4k(priv, false);
/* Clear the 16 VLAN member configurations */
vlanmc.vid = 0;
@@ -282,8 +282,8 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
vlanmc.member = 0;
vlanmc.untag = 0;
vlanmc.fid = 0;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
}
@@ -298,12 +298,12 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 member = 0;
u32 untag = 0;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
+ if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
return -EINVAL;
}
@@ -312,13 +312,13 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
- ret = rtl8366_enable_vlan4k(smi, true);
+ ret = rtl8366_enable_vlan4k(priv, true);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
return ret;
}
- dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
@@ -327,18 +327,18 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (untagged)
untag |= BIT(port);
- ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
if (ret) {
- dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
return ret;
}
if (!pvid)
return 0;
- ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ ret = rtl8366_set_pvid(priv, port, vlan->vid);
if (ret) {
- dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
port, vlan->vid);
return ret;
}
@@ -350,15 +350,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret, i;
- dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
+ dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
@@ -376,9 +376,9 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
vlanmc.priority = 0;
vlanmc.fid = 0;
}
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to remove VLAN %04x\n",
vlan->vid);
return ret;
@@ -394,15 +394,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366_mib_counter *mib;
int i;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
- mib = &smi->mib_counters[i];
+ for (i = 0; i < priv->num_mib_counters; i++) {
+ mib = &priv->mib_counters[i];
strncpy(data + i * ETH_GSTRING_LEN,
mib->name, ETH_GSTRING_LEN);
}
@@ -411,35 +411,35 @@ EXPORT_SYMBOL_GPL(rtl8366_get_strings);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- return smi->num_mib_counters;
+ return priv->num_mib_counters;
}
EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int i;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
+ for (i = 0; i < priv->num_mib_counters; i++) {
struct rtl8366_mib_counter *mib;
u64 mibvalue = 0;
- mib = &smi->mib_counters[i];
- ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ mib = &priv->mib_counters[i];
+ ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
if (ret) {
- dev_err(smi->dev, "error reading MIB counter %s\n",
+ dev_err(priv->dev, "error reading MIB counter %s\n",
mib->name);
}
data[i] = mibvalue;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index ecc19bd5115f..1a3406b9e64c 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,7 +21,7 @@
#include <linux/of_irq.h>
#include <linux/regmap.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -396,7 +396,7 @@ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 70, 2, "IfOutBroadcastPkts" },
};
-static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue)
@@ -412,12 +412,12 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
- ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ ret = regmap_write(priv->map, addr, 0); /* Write whatever */
if (ret)
return ret;
/* Read MIB control register */
- ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
if (ret)
return -EIO;
@@ -430,7 +430,7 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Read each individual MIB 16 bits at the time */
*mibvalue = 0;
for (i = mib->length; i > 0; i--) {
- ret = regmap_read(smi->map, addr + (i - 1), &val);
+ ret = regmap_read(priv->map, addr + (i - 1), &val);
if (ret)
return ret;
*mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
@@ -455,38 +455,38 @@ static u32 rtl8366rb_get_irqmask(struct irq_data *d)
static void rtl8366rb_mask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d), 0);
if (ret)
- dev_err(smi->dev, "could not mask IRQ\n");
+ dev_err(priv->dev, "could not mask IRQ\n");
}
static void rtl8366rb_unmask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d),
rtl8366rb_get_irqmask(d));
if (ret)
- dev_err(smi->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "could not unmask IRQ\n");
}
static irqreturn_t rtl8366rb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
u32 stat;
int ret;
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&stat);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
stat &= RTL8366RB_INTERRUPT_VALID;
@@ -502,7 +502,7 @@ static irqreturn_t rtl8366rb_irq(int irq, void *data)
*/
if (line < 12 && line > 5)
line -= 5;
- child_irq = irq_find_mapping(smi->irqdomain, line);
+ child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
@@ -538,7 +538,7 @@ static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
{
struct device_node *intc;
unsigned long irq_trig;
@@ -547,24 +547,24 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
u32 val;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* RB8366RB IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
- dev_err(smi->dev, "failed to get parent IRQ\n");
+ dev_err(priv->dev, "failed to get parent IRQ\n");
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&val);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
goto out_put_node;
}
@@ -573,48 +573,48 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
- dev_info(smi->dev, "active high/rising IRQ\n");
+ dev_info(priv->dev, "active high/rising IRQ\n");
val = 0;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
- dev_info(smi->dev, "active low/falling IRQ\n");
+ dev_info(priv->dev, "active low/falling IRQ\n");
val = RTL8366RB_INTERRUPT_POLARITY;
break;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_INTERRUPT_POLARITY,
val);
if (ret) {
- dev_err(smi->dev, "could not configure IRQ polarity\n");
+ dev_err(priv->dev, "could not configure IRQ polarity\n");
goto out_put_node;
}
- ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
rtl8366rb_irq, IRQF_ONESHOT,
- "RTL8366RB", smi);
+ "RTL8366RB", priv);
if (ret) {
- dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to create IRQ domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++)
- irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+ for (i = 0; i < priv->num_ports; i++)
+ irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
out_put_node:
of_node_put(intc);
return ret;
}
-static int rtl8366rb_set_addr(struct realtek_smi *smi)
+static int rtl8366rb_set_addr(struct realtek_priv *priv)
{
u8 addr[ETH_ALEN];
u16 val;
@@ -622,18 +622,18 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
eth_random_addr(addr);
- dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
val = addr[0] << 8 | addr[1];
- ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
if (ret)
return ret;
val = addr[2] << 8 | addr[3];
- ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
if (ret)
return ret;
val = addr[4] << 8 | addr[5];
- ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
if (ret)
return ret;
@@ -765,7 +765,7 @@ static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
/* Function that jams the tables in the proper registers */
static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
- int jam_size, struct realtek_smi *smi,
+ int jam_size, struct realtek_priv *priv,
bool write_dbg)
{
u32 val;
@@ -774,24 +774,24 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
for (i = 0; i < jam_size; i++) {
if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_PHY_ACCESS_BUSY_REG,
&val);
if (ret)
return ret;
if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
+ ret = regmap_write(priv->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
}
}
if (write_dbg)
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ dev_dbg(priv->dev, "jam %04x into register %04x\n",
jam_table[i].val,
jam_table[i].reg);
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
jam_table[i].reg,
jam_table[i].val);
if (ret)
@@ -802,7 +802,7 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
static int rtl8366rb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
@@ -812,11 +812,11 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
int ret;
int i;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
- dev_err(smi->dev, "unable to read chip id\n");
+ dev_err(priv->dev, "unable to read chip id\n");
return ret;
}
@@ -824,18 +824,18 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
case RTL8366RB_CHIP_ID_8366:
break;
default:
- dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
return -ENODEV;
}
- ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
&chip_ver);
if (ret) {
- dev_err(smi->dev, "unable to read chip version\n");
+ dev_err(priv->dev, "unable to read chip version\n");
return ret;
}
- dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ dev_info(priv->dev, "RTL%04x ver %u chip found\n",
chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
/* Do the init dance using the right jam table */
@@ -872,20 +872,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
if (ret)
return ret;
/* Isolate all user ports so they can only send packets to itself and the CPU port */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
}
/* CPU port can send packets to all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
@@ -893,26 +893,26 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
- ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
if (ret)
return ret;
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
if (ret)
return ret;
/* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
- ret = regmap_write(smi->map, 0x0c, 0x240);
+ ret = regmap_write(priv->map, 0x0c, 0x240);
if (ret)
return ret;
- ret = regmap_write(smi->map, 0x0d, 0x240);
+ ret = regmap_write(priv->map, 0x0d, 0x240);
if (ret)
return ret;
/* Set some random MAC address */
- ret = rtl8366rb_set_addr(smi);
+ ret = rtl8366rb_set_addr(priv);
if (ret)
return ret;
@@ -921,21 +921,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
* the custom tag is turned off.
*/
- ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
0xFFFF,
- BIT(smi->cpu_port));
+ BIT(priv->cpu_port));
if (ret)
return ret;
/* Make sure we default-enable the fixed CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
- BIT(smi->cpu_port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
+ BIT(priv->cpu_port),
0);
if (ret)
return ret;
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
@@ -945,13 +945,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
rb->max_mtu[i] = 1532;
/* Disable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
+ ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -962,30 +962,30 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* connected to something exotic such as fiber, then this might
* be worth experimenting with.
*/
- ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
RTL8366RB_PMC0_P4_IOMODE_MASK,
0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
if (ret)
return ret;
/* Accept all packets by default, we enable filtering on-demand */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
0);
if (ret)
return ret;
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
0);
if (ret)
return ret;
/* Don't drop packets whose DA has not been learned */
- ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
if (ret)
return ret;
/* Set blinking, TODO: make this configurable */
- ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
if (ret)
@@ -996,15 +996,15 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* behaviour (no individual config) but we can set up each
* LED separately.
*/
- if (smi->leds_disabled) {
+ if (priv->leds_disabled) {
/* Turn everything off */
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
0);
@@ -1014,7 +1014,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
val = RTL8366RB_LED_FORCE;
}
for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_CTRL_REG,
0xf << (i * 4),
val << (i * 4));
@@ -1022,18 +1022,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_reset_vlan(smi);
+ ret = rtl8366_reset_vlan(priv);
if (ret)
return ret;
- ret = rtl8366rb_setup_cascaded_irq(smi);
+ ret = rtl8366rb_setup_cascaded_irq(priv);
if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_info(smi->dev, "could not set up MDIO bus\n");
- return -ENODEV;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
}
return 0;
@@ -1052,35 +1054,35 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
- ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
+ dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
return;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
if (ret) {
- dev_err(smi->dev, "failed to set PAACR on CPU port\n");
+ dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
/* Enable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret) {
- dev_err(smi->dev, "failed to enable the CPU port\n");
+ dev_err(priv->dev, "failed to enable the CPU port\n");
return;
}
}
@@ -1089,107 +1091,108 @@ static void
rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
/* Disable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to disable the CPU port\n");
+ dev_err(priv->dev, "failed to disable the CPU port\n");
return;
}
}
-static void rb8366rb_set_port_led(struct realtek_smi *smi,
+static void rb8366rb_set_port_led(struct realtek_priv *priv,
int port, bool enable)
{
u16 val = enable ? 0x3f : 0;
int ret;
- if (smi->leds_disabled)
+ if (priv->leds_disabled)
return;
switch (port) {
case 0:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F, val);
break;
case 1:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F << RTL8366RB_LED_1_OFFSET,
val << RTL8366RB_LED_1_OFFSET);
break;
case 2:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F, val);
break;
case 3:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F << RTL8366RB_LED_3_OFFSET,
val << RTL8366RB_LED_3_OFFSET);
break;
case 4:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
enable ? RTL8366RB_P4_RGMII_LED : 0);
break;
default:
- dev_err(smi->dev, "no LED for port %d\n", port);
+ dev_err(priv->dev, "no LED for port %d\n", port);
return;
}
if (ret)
- dev_err(smi->dev, "error updating LED on port %d\n", port);
+ dev_err(priv->dev, "error updating LED on port %d\n", port);
}
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "enable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret)
return ret;
- rb8366rb_set_port_led(smi, port, true);
+ rb8366rb_set_port_led(priv, port, true);
return 0;
}
static void
rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "disable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret)
return;
- rb8366rb_set_port_led(smi, port, false);
+ rb8366rb_set_port_led(priv, port, false);
}
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1202,17 +1205,17 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
if (ret)
- dev_err(smi->dev, "failed to join port %d\n", port);
+ dev_err(priv->dev, "failed to join port %d\n", port);
port_bitmap |= BIT(i);
}
/* Set the bits for the ports we can access */
- return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
}
@@ -1221,7 +1224,7 @@ static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1234,28 +1237,30 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
if (ret)
- dev_err(smi->dev, "failed to leave port %d\n", port);
+ dev_err(priv->dev, "failed to leave port %d\n", port);
port_bitmap |= BIT(i);
}
/* Clear the bits for the ports we can not access, leave ourselves */
- regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
}
/**
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
- * @smi: SMI state container
+ * @priv: SMI state container
* @port: the port to drop untagged and C-tagged frames on
* @drop: whether to drop or pass untagged and C-tagged frames
+ *
+ * Return: zero for success, a negative number on error.
*/
-static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
{
- return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
}
@@ -1264,17 +1269,17 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
vlan_filtering ? "enable" : "disable");
/* If the port is not in the member set, the frame will be dropped */
- ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
BIT(port), vlan_filtering ? BIT(port) : 0);
if (ret)
return ret;
@@ -1284,9 +1289,9 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
* filtering on a port, we need to accept any frames.
*/
if (vlan_filtering)
- ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
else
- ret = rtl8366rb_drop_untagged(smi, port, false);
+ ret = rtl8366rb_drop_untagged(priv, port, false);
return ret;
}
@@ -1308,11 +1313,11 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
BIT(port),
(flags.val & BR_LEARNING) ? 0 : BIT(port));
if (ret)
@@ -1325,7 +1330,7 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
static void
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 val;
int i;
@@ -1344,13 +1349,13 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
val = RTL8366RB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "unknown bridge state requested\n");
+ dev_err(priv->dev, "unknown bridge state requested\n");
return;
}
/* Set the same status for the port on all the FIDs */
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
- regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
RTL8366RB_STP_STATE_MASK(port),
RTL8366RB_STP_STATE(port, val));
}
@@ -1359,26 +1364,26 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static void
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* This will age out any learned L2 entries */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), BIT(port));
/* Restore the normal state of things */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), 0);
}
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
unsigned int max_mtu;
u32 len;
int i;
/* Cache the per-port MTU setting */
- rb = smi->chip_data;
+ rb = priv->chip_data;
rb->max_mtu[port] = new_mtu;
/* Roof out the MTU for the entire switch to the greatest
@@ -1406,7 +1411,7 @@ static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
len);
}
@@ -1419,7 +1424,7 @@ static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
return 15996;
}
-static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1432,19 +1437,19 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return -EINVAL;
/* write VID */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
vid & RTL8366RB_VLAN_VID_MASK);
if (ret)
return ret;
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_READ_CTRL);
if (ret)
return ret;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_TABLE_READ_BASE + i,
&data[i]);
if (ret)
@@ -1460,7 +1465,7 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return 0;
}
-static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1480,7 +1485,7 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
data[i]);
if (ret)
@@ -1488,13 +1493,13 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
}
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_WRITE_CTRL);
return ret;
}
-static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1507,7 +1512,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return -EINVAL;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
&data[i]);
if (ret)
@@ -1525,7 +1530,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1549,7 +1554,7 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
data[i]);
if (ret)
@@ -1559,15 +1564,15 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
{
u32 data;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
&data);
if (ret)
return ret;
@@ -1578,22 +1583,22 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
return 0;
}
-static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
pvid_enabled = !!index;
- if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
- RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
- (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
if (ret)
return ret;
@@ -1604,17 +1609,17 @@ static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
- ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+ if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
}
-static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
{
unsigned int max = RTL8366RB_NUM_VLANS - 1;
- if (smi->vlan4k_enabled)
+ if (priv->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
if (vlan > max)
@@ -1623,23 +1628,23 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
return true;
}
-static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map,
+ dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
}
-static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
}
-static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 val;
u32 reg;
@@ -1648,32 +1653,32 @@ static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
return ret;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- ret = regmap_write(smi->map, reg, 0);
+ ret = regmap_write(priv->map, reg, 0);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
return ret;
}
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
if (ret)
return ret;
- dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
return val;
}
-static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 reg;
@@ -1682,34 +1687,45 @@ static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
- ret = regmap_write(smi->map, reg, val);
+ ret = regmap_write(priv->map, reg, val);
if (ret)
return ret;
return 0;
}
-static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8366rb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
+}
+
+static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
u32 val;
int ret;
- realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
- RTL8366RB_CHIP_CTRL_RESET_HW);
+ priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
do {
usleep_range(20000, 25000);
- ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
if (ret)
return ret;
@@ -1718,21 +1734,21 @@ static int rtl8366rb_reset_chip(struct realtek_smi *smi)
} while (--timeout);
if (!timeout) {
- dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ dev_err(priv->dev, "timeout waiting for the switch to reset\n");
return -EIO;
}
return 0;
}
-static int rtl8366rb_detect(struct realtek_smi *smi)
+static int rtl8366rb_detect(struct realtek_priv *priv)
{
- struct device *dev = smi->dev;
+ struct device *dev = priv->dev;
int ret;
u32 val;
/* Detect device */
- ret = regmap_read(smi->map, 0x5c, &val);
+ ret = regmap_read(priv->map, 0x5c, &val);
if (ret) {
dev_err(dev, "can't get chip ID (%d)\n", ret);
return ret;
@@ -1745,11 +1761,11 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
return -ENODEV;
case 0x5937:
dev_info(dev, "found an RTL8366RB switch\n");
- smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
- smi->num_ports = RTL8366RB_NUM_PORTS;
- smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
- smi->mib_counters = rtl8366rb_mib_counters;
- smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ priv->num_ports = RTL8366RB_NUM_PORTS;
+ priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ priv->mib_counters = rtl8366rb_mib_counters;
+ priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
break;
default:
dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
@@ -1757,14 +1773,14 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
break;
}
- ret = rtl8366rb_reset_chip(smi);
+ ret = rtl8366rb_reset_chip(priv);
if (ret)
return ret;
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_mac_link_up = rtl8366rb_mac_link_up,
@@ -1787,7 +1803,32 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .phy_read = rtl8366rb_dsa_phy_read,
+ .phy_write = rtl8366rb_dsa_phy_write,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
+};
+
+static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
.set_vlan_mc = rtl8366rb_set_vlan_mc,
@@ -1803,12 +1844,17 @@ static const struct realtek_smi_ops rtl8366rb_smi_ops = {
.phy_write = rtl8366rb_phy_write,
};
-const struct realtek_smi_variant rtl8366rb_variant = {
- .ds_ops = &rtl8366rb_switch_ops,
- .ops = &rtl8366rb_smi_ops,
+const struct realtek_variant rtl8366rb_variant = {
+ .ds_ops_smi = &rtl8366rb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 7dcdd784aea4..fad5afe3819c 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -300,6 +300,46 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
@@ -321,12 +361,9 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- rc = -EOPNOTSUPP;
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
goto out;
- }
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c2a47c6693b8..b33841c6507a 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -393,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
- /* This selects between Independent VLAN Learning (IVL) and
- * Shared VLAN Learning (SVL)
- */
- .shared_learn = true,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -1358,37 +1356,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-/* The SJA1105 MAC programming model is through the static config (the xMII
- * Mode table cannot be dynamically reconfigured), and we have to program
- * that early (earlier than PHYLINK calls us, anyway).
- * So just error out in case the connected PHY attempts to change the initial
- * system interface MII protocol from what is defined in the DT, at least for
- * now.
- */
-static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
- phy_interface_t interface)
-{
- return priv->phy_mode[port] != interface;
-}
-
-static void sja1105_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs;
-
- if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
- phy_modes(state->interface));
- return;
- }
-
- xpcs = priv->xpcs[port];
+ struct dw_xpcs *xpcs = priv->xpcs[port];
if (xpcs)
- phylink_set_pcs(dp->pl, &xpcs->pcs);
+ return &xpcs->pcs;
+
+ return NULL;
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -1412,48 +1389,53 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
sja1105_inhibit_tx(priv, BIT(port), false);
}
-static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- /* Construct a new mask which exhaustively contains all link features
- * supported by the MAC, and then apply that (logical AND) to what will
- * be sent to the PHY for "marketing".
- */
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct sja1105_private *priv = ds->priv;
struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
*/
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- linkmode_zero(supported);
- return;
+ config->legacy_pre_march2020 = false;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
}
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
- phylink_set(mask, Autoneg);
- phylink_set(mask, MII);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT1_Full);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
- phylink_set(mask, 1000baseT_Full);
- if (priv->info->supports_2500basex[port]) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ config->mac_capabilities |= MAC_1000FD;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
}
static int
@@ -1819,25 +1801,52 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
}
static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1874,7 +1883,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dp))
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -1885,7 +1894,15 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
static void sja1105_fast_age(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
int i;
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
@@ -1913,7 +1930,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid);
+ rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
@@ -1924,15 +1941,17 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
}
/* Common function for unicast and broadcast flood configuration.
@@ -2075,7 +2094,8 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
- bool *tx_fwd_offload)
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
int rc;
@@ -2083,7 +2103,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge);
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
@@ -2097,7 +2117,7 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge);
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
sja1105_bridge_member(ds, port, bridge, false);
}
@@ -2357,7 +2377,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
- struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -2395,28 +2414,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
- /* VLAN filtering => independent VLAN learning.
- * No VLAN filtering (or best effort) => shared VLAN learning.
- *
- * In shared VLAN learning mode, untagged traffic still gets
- * pvid-tagged, and the FDB table gets populated with entries
- * containing the "real" (pvid or from VLAN tag) VLAN ID.
- * However the switch performs a masked L2 lookup in the FDB,
- * effectively only looking up a frame's DMAC (and not VID) for the
- * forwarding decision.
- *
- * This is extremely convenient for us, because in modes with
- * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
- * each front panel port. This is good for identification but breaks
- * learning badly - the VID of the learnt FDB entry is unique, aka
- * no frames coming from any other port are going to have it. So
- * for forwarding purposes, this is as though learning was broken
- * (all frames get flooded).
- */
- table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
- l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
-
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -2525,7 +2522,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
*/
if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
- "Range 1024-3071 reserved for dsa_8021q operation");
+ "Range 3072-4095 reserved for dsa_8021q operation");
return -EBUSY;
}
@@ -2850,7 +2847,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
static int sja1105_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
ingress, true);
@@ -3102,6 +3099,7 @@ static int sja1105_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
+ ds->fdb_isolation = true;
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
ds->max_num_bridges = 7;
@@ -3152,8 +3150,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
- .phylink_validate = sja1105_phylink_validate,
- .phylink_mac_config = sja1105_mac_config,
+ .phylink_get_caps = sja1105_phylink_get_caps,
+ .phylink_mac_select_pcs = sja1105_mac_select_pcs,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index be3068a935af..30fb2cc40164 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -399,7 +399,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (ptp_data->extts_enabled)
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f5dca6a9b0f9..b7e95d60a6e4 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
return false;
}
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
@@ -394,8 +407,9 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
+ /* FIXME */
struct dsa_port *dp = dsa_to_port(priv->ds, port);
- u16 vid = dsa_tag_8021q_rx_vid(dp);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 0730352cdd57..3887ed33c5fe 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -442,34 +442,27 @@ static void xrs700x_teardown(struct dsa_switch *ds)
cancel_delayed_work_sync(&priv->mib_work);
}
-static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0:
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
break;
+
case 1:
case 2:
case 3:
- phylink_set(mask, 1000baseT_Full);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
break;
+
default:
- linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
+ break;
}
-
- phylink_set_port_modes(mask);
-
- /* The switch only supports full duplex. */
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
@@ -541,7 +534,8 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge, bool *tx_fwd_offload)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
@@ -703,7 +697,7 @@ static const struct dsa_switch_ops xrs700x_ops = {
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
- .phylink_validate = xrs700x_phylink_validate,
+ .phylink_get_caps = xrs700x_phylink_get_caps,
.phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 8aec5d9fbfef..ad57209007e1 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -138,11 +138,6 @@ MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
module_param(rx_copybreak, int, 0);
module_param(use_mmio, int, 0);
-#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
-#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
-#undef NETIF_F_TSO
-#endif
-
#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
#error TX ring too small!
#endif
@@ -2261,9 +2256,28 @@ out:
return mode;
}
+#if MAX_SKB_FRAGS > 32
+
+#include <net/vxlan.h>
+
+static netdev_features_t typhoon_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags > 32 && skb_is_gso(skb))
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+#endif
+
static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
+#if MAX_SKB_FRAGS > 32
+ .ndo_features_check = typhoon_features_check,
+#endif
.ndo_start_xmit = typhoon_start_tx,
.ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 90cd7bdf06f5..21047ae1bc3d 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -410,10 +410,8 @@ static int mcf8390_probe(struct platform_device *pdev)
int ret, irq;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ specified?\n");
+ if (irq < 0)
return -ENXIO;
- }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL) {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index db3ec4768159..bd4cb9d7c35d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,6 +78,7 @@ source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/fungible/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8a87c1083d1d..8ef43e0c33c0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 537e6a85e18d..fbf4588994ac 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2413,11 +2413,13 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
kfree(tx_ring->tcb_ring);
}
+#define MAX_TX_DESC_PER_PKT 24
+
/* nic_send_packet - NIC specific send handler for version B silicon. */
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
u32 i;
- struct tx_desc desc[24];
+ struct tx_desc desc[MAX_TX_DESC_PER_PKT];
u32 frag = 0;
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
@@ -2432,9 +2434,6 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* more than 5 fragments.
*/
- /* nr_frags should be no more than 18. */
- BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
-
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
for (i = 0; i < nr_frags; i++) {
@@ -3762,6 +3761,13 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
+ /* This driver does not support TSO, it is very unlikely
+ * this condition is true.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > MAX_TX_DESC_PER_PKT - 2)) {
+ if (skb_linearize(skb))
+ goto drop_err;
+ }
/* stop the queue if it's getting full */
if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index db97170da8c7..7f247ccbe6ba 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -513,7 +513,7 @@ static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- /* if DMA is busy, wait for current transactino to finish */
+ /* if DMA is busy, wait for current transaction to finish */
while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 993b2fb42961..a3816264c35c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
*/
#define ALTERA_RXDMABUFFER_SIZE 2048
-/* Allow network stack to resume queueing packets after we've
+/* Allow network stack to resume queuing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
@@ -390,7 +390,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
- /* DMA trasfer from TSE starts with 2 aditional bytes for
+ /* DMA transfer from TSE starts with 2 additional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
@@ -1044,7 +1044,7 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
@@ -1064,7 +1064,7 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode(struct net_device *dev)
{
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 53080fd143dc..07444aead3fd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1400,10 +1400,9 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
struct sk_buff *skb;
if (!first_frag)
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
+ skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
else
- skb = build_skb(first_frag, ENA_PAGE_SIZE);
+ skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index ff2d099aab21..53dc8d5fede8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
buf_pool->rx_skb[skb_index] = NULL;
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+
+ /* strip off CRC as HW isn't doing this */
+ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+ if (!nv)
+ datalen -= 4;
+
skb_put(skb, datalen);
prefetch(skb->data - NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, ndev);
@@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
}
}
- nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv) {
- /* strip off CRC as HW isn't doing this */
- datalen -= 4;
+ if (!nv)
goto skip_jumbo;
- }
slots = page_pool->slots - 1;
head = page_pool->head;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index bf70481bb1ca..6ba5b024a7be 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -433,7 +433,7 @@ ax88796c_skb_return(struct ax88796c_device *ax_local,
netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
- status = netif_rx_ni(skb);
+ status = netif_rx(skb);
if (status != NET_RX_SUCCESS && net_ratelimit())
netif_info(ax_local, rx_err, ndev,
"netif_rx status %d\n", status);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index f50604f3e541..49459397993e 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1051,7 +1051,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b04e423c446a..c1b97e8c55ef 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,17 +1716,17 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_irq, *res_irq_rx, *res_irq_tx;
+ int irq, irq_rx, irq_tx;
struct mii_bus *bus;
int i, ret;
if (!bcm_enet_shared_base[0])
return -EPROBE_DEFER;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_irq || !res_irq_rx || !res_irq_tx)
+ irq = platform_get_irq(pdev, 0);
+ irq_rx = platform_get_irq(pdev, 1);
+ irq_tx = platform_get_irq(pdev, 2);
+ if (irq < 0 || irq_rx < 0 || irq_tx < 0)
return -ENODEV;
dev = alloc_etherdev(sizeof(*priv));
@@ -1748,9 +1748,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
goto out;
}
- dev->irq = priv->irq = res_irq->start;
- priv->irq_rx = res_irq_rx->start;
- priv->irq_tx = res_irq_tx->start;
+ dev->irq = priv->irq = irq;
+ priv->irq_rx = irq_rx;
+ priv->irq_tx = irq_tx;
priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2209d99b3404..dd5945c4bfec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1271,7 +1271,7 @@ struct bnx2x_fw_stats_data {
struct per_port_stats port;
struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[];
};
/* Public slow path states */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4e85e7dbc2be..7071604f9984 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6178,7 +6178,8 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+ num & 0xFFFF);
*len -= ret;
return 0;
}
@@ -6193,7 +6194,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+ (num >> 8) & 0xFF, num & 0xFF);
*len -= ret;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b1c98d1408b8..1c28495875cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -233,6 +233,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -369,7 +370,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -645,7 +646,7 @@ tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -2079,6 +2080,16 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
(BNXT_EVENT_RING_TYPE(data2) == \
ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
+
+#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
+
+#define BNXT_PHC_BITS 48
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -2258,6 +2269,24 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_event_error_report(bp, data1, data2);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 ns;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(bp);
+ ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+ BNXT_PHC_BITS) | ptp->current_time);
+ bnxt_ptp_rtc_timecounter_init(ptp, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+ break;
+ }
+ goto async_event_process_exit;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
@@ -7416,6 +7445,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ bool phc_cfg;
u8 flags;
int rc;
@@ -7458,7 +7488,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto exit;
}
- rc = bnxt_ptp_init(bp);
+ phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp, phc_cfg);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -7516,6 +7547,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
@@ -9267,7 +9300,7 @@ void bnxt_tx_enable(struct bnxt *bp)
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
netif_carrier_on(bp->dev);
}
@@ -9297,7 +9330,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
void bnxt_report_link(struct bnxt *bp)
{
- if (bp->link_info.link_up) {
+ if (BNXT_LINK_IS_UP(bp)) {
const char *signal = "";
const char *flow_ctrl;
const char *duplex;
@@ -9383,7 +9416,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
- bp->phy_flags = resp->flags;
+ bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
@@ -9433,7 +9466,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_output *resp;
struct hwrm_port_phy_qcfg_input *req;
- u8 link_up = link_info->link_up;
+ u8 link_state = link_info->link_state;
bool support_changed = false;
int rc;
@@ -9534,14 +9567,14 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
- link_info->link_up = 1;
+ link_info->link_state = BNXT_LINK_STATE_UP;
else
- link_info->link_up = 0;
- if (link_up != link_info->link_up)
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
+ if (link_state != link_info->link_state)
bnxt_report_link(bp);
} else {
- /* alwasy link down if not require to update link state */
- link_info->link_up = 0;
+ /* always link down if not require to update link state */
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
}
hwrm_req_drop(bp, req);
@@ -9741,7 +9774,18 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return rc;
req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
- return hwrm_req_send(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ /* Device is not obliged link down in certain scenarios, even
+ * when forced. Setting the state unknown is consistent with
+ * driver startup and will force link state to be reported
+ * during subsequent open based on PORT_PHY_QCFG.
+ */
+ bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
}
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
@@ -10172,7 +10216,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!bp->link_info.link_up)
+ if (!BNXT_LINK_IS_UP(bp))
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -10307,6 +10351,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ bnxt_ptp_init_rtc(bp, true);
return 0;
open_err_irq:
@@ -11403,7 +11448,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && bp->stats_coal_ticks) {
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -12104,11 +12149,6 @@ int bnxt_fw_init_one(struct bnxt *bp)
if (rc)
return rc;
- /* In case fw capabilities have changed, destroy the unneeded
- * reporters and create newly capable ones.
- */
- bnxt_dl_fw_reporters_destroy(bp, false);
- bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -12937,7 +12977,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&bp->fw_reset_task);
bp->sp_event = 0;
- bnxt_dl_fw_reporters_destroy(bp, true);
+ bnxt_dl_fw_reporters_destroy(bp);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -13430,7 +13470,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
- mutex_init(&bp->sriov_lock);
#endif
if (BNXT_SUPPORTS_TPA(bp)) {
bp->gro_func = bnxt_gro_func_5730x;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 666fc1e7a7d2..61aa3e8c5952 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1175,7 +1175,11 @@ struct bnxt_link_info {
#define BNXT_PHY_STATE_ENABLED 0
#define BNXT_PHY_STATE_DISABLED 1
- u8 link_up;
+ u8 link_state;
+#define BNXT_LINK_STATE_UNKNOWN 0
+#define BNXT_LINK_STATE_DOWN 1
+#define BNXT_LINK_STATE_UP 2
+#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1958,6 +1962,7 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000
+ #define BNXT_FW_CAP_PTP_RTC 0x00400000
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
@@ -2067,12 +2072,6 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
-
- /* lock to protect VF-rep creation/cleanup via
- * multiple paths such as ->sriov_configure() and
- * devlink ->eswitch_mode_set()
- */
- struct mutex sriov_lock;
#endif
#if BITS_PER_LONG == 32
@@ -2099,8 +2098,8 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
- /* copied from flags in hwrm_port_phy_qcaps_output */
- u8 phy_flags;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ u32 phy_flags;
#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
@@ -2109,6 +2108,8 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 217ff597cdf2..caab3d626a2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -627,7 +627,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
+ (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f6e21fac0e69..0c17f90d44a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -241,37 +241,37 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.recover = bnxt_fw_recover,
};
-void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+static struct devlink_health_reporter *
+__bnxt_dl_reporter_create(struct bnxt *bp,
+ const struct devlink_health_reporter_ops *ops)
{
- struct bnxt_fw_health *health = bp->fw_health;
-
- if (!health || health->fw_reporter)
- return;
+ struct devlink_health_reporter *reporter;
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ if (IS_ERR(reporter)) {
+ netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
+ ops->name, PTR_ERR(reporter));
+ return NULL;
}
+
+ return reporter;
}
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (!health)
- return;
+ if (fw_health && !fw_health->fw_reporter)
+ fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
+}
- if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
- return;
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (health->fw_reporter) {
- devlink_health_reporter_destroy(health->fw_reporter);
- health->fw_reporter = NULL;
+ if (fw_health && fw_health->fw_reporter) {
+ devlink_health_reporter_destroy(fw_health->fw_reporter);
+ fw_health->fw_reporter = NULL;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index a715458abc30..b8105065367b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -75,7 +75,7 @@ void bnxt_devlink_health_fw_report(struct bnxt *bp);
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8aaa2335f848..22e965e18fbc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -803,9 +804,11 @@ static void bnxt_get_ringparam(struct net_device *dev,
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
} else {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
ering->rx_jumbo_max_pending = 0;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
@@ -1659,15 +1662,19 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
fw_speeds = link_info->support_pam4_speeds;
BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+ }
if (link_info->support_auto_speeds ||
link_info->support_pam4_auto_speeds)
@@ -1898,7 +1905,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
- set_pause = true;
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ set_pause = true;
} else {
u8 phy_type = link_info->phy_type;
@@ -2090,7 +2098,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_PHY_CFG_ABLE(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -2101,9 +2109,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
}
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -2132,7 +2138,7 @@ static u32 bnxt_get_link(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
/* TODO: handle MF, VF, driver close case */
- return bp->link_info.link_up;
+ return BNXT_LINK_IS_UP(bp);
}
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
@@ -2509,6 +2515,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
u8 *kmem = NULL;
u32 modify_len;
u32 item_len;
+ u8 cmd_err;
u16 index;
int rc;
@@ -2592,6 +2599,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
}
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
if (defrag_attempted) {
/* We have tried to defragment already in the previous
@@ -2600,15 +2609,24 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
break;
}
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (cmd_err) {
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
+ netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure Anti-rollback detected\n");
+ rc = -EALREADY;
+ break;
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
install->flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
+ if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
/* FW has cleared NVM area, driver will create
* UPDATE directory and try the flash again
*/
@@ -2618,11 +2636,13 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
0, 0, item_len, NULL, 0);
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ if (!rc)
+ break;
}
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ fallthrough;
+ default:
+ netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x cmd_err :%x\n",
+ rc, cmd_err);
}
} while (defrag_attempted && !rc);
@@ -3324,7 +3344,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index ea86c54247c7..b7100edbd6dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -369,6 +369,12 @@ struct cmd_nums {
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
#define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -390,6 +396,9 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
#define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -532,8 +541,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 63
-#define HWRM_VERSION_STR "1.10.2.63"
+#define HWRM_VERSION_RSVD 73
+#define HWRM_VERSION_STR "1.10.2.73"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -757,10 +766,11 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x47UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1112,34 +1122,37 @@ struct hwrm_async_event_cmpl_echo_request {
__le32 event_data1;
};
-/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_master {
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
__le16 type;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
__le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
};
/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
@@ -1330,6 +1343,30 @@ struct hwrm_async_event_cmpl_error_report_nvm {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
};
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1589,6 +1626,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -2455,7 +2496,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 rkc_entry_size;
__le32 tkc_max_entries;
__le32 rkc_max_entries;
- u8 rsvd[7];
+ u8 rsvd1[7];
u8 valid;
};
@@ -3164,7 +3205,7 @@ struct hwrm_func_ptp_pin_cfg_output {
u8 valid;
};
-/* hwrm_func_ptp_cfg_input (size:320b/40B) */
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
struct hwrm_func_ptp_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3178,6 +3219,7 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
u8 ptp_pps_event;
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
@@ -3204,6 +3246,7 @@ struct hwrm_func_ptp_cfg_input {
__le32 ptp_freq_adj_ext_up;
__le32 ptp_freq_adj_ext_phase_lower;
__le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
};
/* hwrm_func_ptp_cfg_output (size:128b/16B) */
@@ -3243,6 +3286,308 @@ struct hwrm_func_ptp_ts_query_output {
u8 valid;
};
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 rsvd[2];
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 rsvd2;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd3[3];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -3741,7 +4086,7 @@ struct hwrm_port_phy_qcfg_output {
u8 valid;
};
-/* hwrm_port_mac_cfg_input (size:384b/48B) */
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3807,7 +4152,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- __le32 ptp_adj_phase;
+ u8 unused_1[4];
+ __le64 ptp_adj_phase;
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -3850,6 +4196,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4339,7 +4686,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
@@ -4399,7 +4747,7 @@ struct hwrm_port_phy_qcaps_output {
__le16 flags2;
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- u8 unused_0[1];
+ u8 internal_port_cnt;
u8 valid;
};
@@ -6221,12 +6569,13 @@ struct hwrm_vnic_rss_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
__le16 vnic_id;
u8 ring_table_pair_index;
u8 hash_mode_flags;
@@ -7898,6 +8247,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
u8 valid;
};
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8909,6 +9259,50 @@ struct hwrm_dbg_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -9372,8 +9766,35 @@ struct hwrm_nvm_install_update_output {
__le16 resp_len;
__le64 installed_items;
u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
u8 problem_item;
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 48520967746f..a0b321a19361 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -19,6 +19,20 @@
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
+static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME);
+ req->ptp_set_time = cpu_to_le64(time);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
@@ -48,6 +62,9 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_cfg_settime(ptp->bp, ns);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_init(&ptp->tc, &ptp->cc, ns);
spin_unlock_bh(&ptp->ptp_lock);
@@ -131,11 +148,47 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
return 0;
}
+/* Caller holds ptp_lock */
+void bnxt_ptp_update_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+}
+
+static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
+{
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE);
+ req->ptp_adj_phase = cpu_to_le64(delta);
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc) {
+ netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(ptp->bp);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+
+ return rc;
+}
+
static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+ return bnxt_ptp_adjphc(ptp, delta);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_adjtime(&ptp->tc, delta);
spin_unlock_bh(&ptp->ptp_lock);
@@ -714,7 +767,70 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
}
-int bnxt_ptp_init(struct bnxt *bp)
+static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp->ptp_clock) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ if (init_tc)
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
+{
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ /* For RTC, cycle_last must be in sync with the timecounter value. */
+ ptp->tc.cycle_last = ns & ptp->cc.mask;
+}
+
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+{
+ struct timespec64 tsp;
+ u64 ns;
+ int rc;
+
+ if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ return -ENODEV;
+
+ if (!phc_cfg) {
+ ktime_get_real_ts64(&tsp);
+ ns = timespec64_to_ns(&tsp);
+ rc = bnxt_ptp_cfg_settime(bp, ns);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ if (rc)
+ return rc;
+ }
+ spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
+ spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+
+ return 0;
+}
+
+static void bnxt_ptp_free(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
+}
+
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -726,26 +842,23 @@ int bnxt_ptp_init(struct bnxt *bp)
if (rc)
return rc;
+ if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ if (rc)
+ goto out;
+ }
+
if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
return 0;
- if (ptp->ptp_clock) {
- ptp_clock_unregister(ptp->ptp_clock);
- ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
- }
+ bnxt_ptp_free(bp);
+
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = bnxt_cc_read;
- ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
- timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ if (!(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ bnxt_ptp_timecounter_init(bp, true);
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
@@ -757,8 +870,8 @@ int bnxt_ptp_init(struct bnxt *bp)
int err = PTR_ERR(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- bnxt_unmap_ptp_regs(bp);
- return err;
+ rc = err;
+ goto out;
}
if (bp->flags & BNXT_FLAG_CHIP_P5) {
spin_lock_bh(&ptp->ptp_lock);
@@ -768,6 +881,11 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp_schedule_worker(ptp->ptp_clock, 0);
}
return 0;
+
+out:
+ bnxt_ptp_free(bp);
+ bnxt_unmap_ptp_regs(bp);
+ return rc;
}
void bnxt_ptp_clear(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 7c528e1f8713..373baf45884b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -131,12 +131,15 @@ do { \
#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_clear(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 1d177fed44a6..ddf2f3963abe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -846,7 +846,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
/* synchronize VF and VF-rep create and destroy */
- mutex_lock(&bp->sriov_lock);
+ devl_lock(bp->dl);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
@@ -859,7 +859,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
- mutex_unlock(&bp->sriov_lock);
+ devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 8eb28e088582..eb4803b11c0e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -559,44 +559,34 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
- int rc = 0;
- mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
bnxt_vf_reps_destroy(bp);
- break;
+ return 0;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
- rc = -ENOTSUPP;
- goto done;
+ return -ENOTSUPP;
}
if (pci_num_vf(bp->pdev) == 0) {
netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- rc = -EPERM;
- goto done;
+ return -EPERM;
}
- rc = bnxt_vf_reps_create(bp);
- break;
+ return bnxt_vf_reps_create(bp);
default:
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
-done:
- mutex_unlock(&bp->sriov_lock);
- return rc;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2da804f84b48..2dd79af9411b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
- writel_relaxed(value, offset);
+ writel(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
- return readl_relaxed(offset);
+ return readl(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
@@ -1368,7 +1368,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
+ ret = phy_init_eee(dev->phydev, false);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9ddbee7de72b..f0a7d8396a4a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
+#include <linux/phy/phy.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -1291,6 +1292,9 @@ struct macb {
u32 wol;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
+
+ struct phy *sgmii_phy; /* for ZynqMP SGMII mode */
+
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d13f06cf0308..800d5ced5800 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -34,7 +34,9 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -2762,10 +2764,14 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
- err = macb_phylink_connect(bp);
+ err = phy_power_on(bp->sgmii_phy);
if (err)
goto reset_hw;
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto phy_off;
+
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
@@ -2773,6 +2779,9 @@ static int macb_open(struct net_device *dev)
return 0;
+phy_off:
+ phy_power_off(bp->sgmii_phy);
+
reset_hw:
macb_reset_hw(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
@@ -2798,6 +2807,8 @@ static int macb_close(struct net_device *dev)
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
+ phy_power_off(bp->sgmii_phy);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -4567,13 +4578,55 @@ static const struct macb_config np4_config = {
.usrio = &macb_default_usrio,
};
+static int zynqmp_init(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PS-GTR PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy");
+
+ if (IS_ERR(bp->sgmii_phy)) {
+ ret = PTR_ERR(bp->sgmii_phy);
+ dev_err_probe(&pdev->dev, ret,
+ "failed to get PS-GTR PHY\n");
+ return ret;
+ }
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init PS-GTR PHY: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* Fully reset GEM controller at hardware level using zynqmp-reset driver,
+ * if mapped in device tree.
+ */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ phy_exit(bp->sgmii_phy);
+ return ret;
+ }
+
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = macb_init,
+ .init = zynqmp_init,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4790,7 +4843,7 @@ static int macb_probe(struct platform_device *pdev)
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_exit;
netif_carrier_off(dev);
@@ -4815,6 +4868,9 @@ err_out_unregister_mdio:
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
+err_out_phy_exit:
+ phy_exit(bp->sgmii_phy);
+
err_out_free_netdev:
free_netdev(dev);
@@ -4836,6 +4892,7 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 574a32f23f96..2f6484dc186a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1409,7 +1409,8 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63521312cb90..174b1e156669 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3349,6 +3349,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (!adapter->registered_device_map) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -ENODEV;
goto out_free_dev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 28fd2de9e4cf..1672d3afe5be 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -8,6 +8,46 @@
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
+static int cxgb4_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -48,11 +88,10 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ ret = cxgb4_policer_validate(actions, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
@@ -150,11 +189,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index c78b99a497df..8014eb33937c 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2363,11 +2363,13 @@ static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
static int gemini_ethernet_port_probe(struct platform_device *pdev)
{
char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct device_node *np = pdev->dev.of_node;
struct gemini_ethernet_port *port;
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
struct device *parent;
+ u8 mac[ETH_ALEN];
unsigned int id;
int irq;
int ret;
@@ -2473,6 +2475,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
DEFAULT_NAPI_WEIGHT);
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(port->mac_addr, mac, ETH_ALEN);
+ }
+
if (is_valid_ether_addr((void *)port->mac_addr)) {
eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
} else {
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 7af86b6d4150..02e0caff98e3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -3,6 +3,19 @@
# Davicom device configuration
#
+config NET_VENDOR_DAVICOM
+ bool "Davicom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Davicom devices. If you say Y, you will be asked
+ for your specific card in the following selections.
+
+if NET_VENDOR_DAVICOM
+
config DM9000
tristate "DM9000 support"
depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
@@ -22,3 +35,21 @@ config DM9000_FORCE_SIMPLE_PHY_POLL
bit to determine if the link is up or down instead of the more
costly MII PHY reads. Note, this will not work if the chip is
operating with an external PHY.
+
+config DM9051
+ tristate "DM9051 SPI support"
+ depends on SPI
+ select CRC32
+ select MDIO
+ select PHYLIB
+ select REGMAP_SPI
+ help
+ Support for DM9051 SPI chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9051.
+
+ The SPI mode for the host's SPI master to access DM9051 is mode
+ 0 on the SPI bus.
+
+endif # NET_VENDOR_DAVICOM
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
index 173c87d21076..225f85bc1f53 100644
--- a/drivers/net/ethernet/davicom/Makefile
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_DM9051) += dm9051.o
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
new file mode 100644
index 000000000000..a523ddda7609
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -0,0 +1,1260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "dm9051.h"
+
+#define DRVNAME_9051 "dm9051"
+
+/**
+ * struct rx_ctl_mach - rx activities record
+ * @status_err_counter: rx status error counter
+ * @large_err_counter: rx get large packet length error counter
+ * @rx_err_counter: receive packet error counter
+ * @tx_err_counter: transmit packet error counter
+ * @fifo_rst_counter: reset operation counter
+ *
+ * To keep track for the driver operation statistics
+ */
+struct rx_ctl_mach {
+ u16 status_err_counter;
+ u16 large_err_counter;
+ u16 rx_err_counter;
+ u16 tx_err_counter;
+ u16 fifo_rst_counter;
+};
+
+/**
+ * struct dm9051_rxctrl - dm9051 driver rx control
+ * @hash_table: Multicast hash-table data
+ * @rcr_all: KS_RXCR1 register setting
+ *
+ * The settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings
+ */
+struct dm9051_rxctrl {
+ u16 hash_table[4];
+ u8 rcr_all;
+};
+
+/**
+ * struct dm9051_rxhdr - rx packet data header
+ * @headbyte: lead byte equal to 0x01 notifies a valid packet
+ * @status: status bits for the received packet
+ * @rxlen: packet length
+ *
+ * The Rx packed, entered into the FIFO memory, start with these
+ * four bytes which is the Rx header, followed by the ethernet
+ * packet data and ends with an appended 4-byte CRC data.
+ * Both Rx packet and CRC data are for check purpose and finally
+ * are dropped by this driver
+ */
+struct dm9051_rxhdr {
+ u8 headbyte;
+ u8 status;
+ __le16 rxlen;
+};
+
+/**
+ * struct board_info - maintain the saved data
+ * @spidev: spi device structure
+ * @ndev: net device structure
+ * @mdiobus: mii bus structure
+ * @phydev: phy device structure
+ * @txq: tx queue structure
+ * @regmap_dm: regmap for register read/write
+ * @regmap_dmbulk: extra regmap for bulk read/write
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @tx_work: Work queue for tx packets
+ * @pause: ethtool pause parameter structure
+ * @spi_lockm: between threads lock structure
+ * @reg_mutex: regmap access lock structure
+ * @bc: rx control statistics structure
+ * @rxhdr: rx header structure
+ * @rctl: rx control setting structure
+ * @msg_enable: message level value
+ * @imr_all: to store operating imr value for register DM9051_IMR
+ * @lcr_all: to store operating rcr value for register DM9051_LMCR
+ *
+ * The saved data variables, keep up to date for retrieval back to use
+ */
+struct board_info {
+ u32 msg_enable;
+ struct spi_device *spidev;
+ struct net_device *ndev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+ struct sk_buff_head txq;
+ struct regmap *regmap_dm;
+ struct regmap *regmap_dmbulk;
+ struct work_struct rxctrl_work;
+ struct work_struct tx_work;
+ struct ethtool_pauseparam pause;
+ struct mutex spi_lockm;
+ struct mutex reg_mutex;
+ struct rx_ctl_mach bc;
+ struct dm9051_rxhdr rxhdr;
+ struct dm9051_rxctrl rctl;
+ u8 imr_all;
+ u8 lcr_all;
+};
+
+static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, reg, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(db->regmap_dm, reg, mask, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* skb buffer exhausted, just discard the received data
+ */
+static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rb;
+ int ret;
+
+ /* no skb buffer,
+ * both reg and &rb must be noinc,
+ * read once one byte via regmap_read
+ */
+ do {
+ ret = regmap_read(db->regmap_dm, reg, &rb);
+ if (ret < 0) {
+ netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n",
+ __func__, ret, reg);
+ break;
+ }
+ } while (--count);
+
+ return ret;
+}
+
+static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_write(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_read(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* waiting tx-end rather than tx-req
+ * got faster
+ */
+static int dm9051_nsr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval,
+ mval & (NSR_TX2END | NSR_TX1END), 1, 20);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "timeout in checking for tx end\n");
+ return ret;
+}
+
+static int dm9051_epcr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval,
+ !(mval & EPCR_ERRE), 100, 10000);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "eeprom/phy in processing get timeout\n");
+ return ret;
+}
+
+static int dm9051_irq_flag(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int irq_type = irq_get_trigger_type(spi->irq);
+
+ if (irq_type)
+ return irq_type;
+
+ return IRQF_TRIGGER_LOW;
+}
+
+static unsigned int dm9051_intcr_value(struct board_info *db)
+{
+ return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ?
+ INTCR_POL_LOW : INTCR_POL_HIGH;
+}
+
+static int dm9051_set_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_set_reg(db, DM9051_FCR, fcr);
+}
+
+static int dm9051_set_recv(struct board_info *db)
+{
+ int ret;
+
+ ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table));
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */
+}
+
+static int dm9051_core_reset(struct board_info *db)
+{
+ int ret;
+
+ db->bc.fifo_rst_counter++;
+
+ ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db));
+}
+
+static int dm9051_update_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr);
+}
+
+static int dm9051_disable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */
+}
+
+static int dm9051_enable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */
+}
+
+static int dm9051_stop_mrcmd(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */
+}
+
+static int dm9051_clear_interrupt(struct board_info *db)
+{
+ return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT);
+}
+
+static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2);
+}
+
+static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ /* this is a 4 bytes data, clear to zero since following regmap_bulk_read
+ * only fill lower 2 bytes
+ */
+ *val = 0;
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2);
+}
+
+static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct board_info *db = bus->priv;
+ unsigned int val = 0xffff;
+ int ret;
+
+ if (addr == DM9051_PHY_ADDR) {
+ ret = dm9051_phyread(db, regnum, &val);
+ if (ret)
+ return ret;
+ }
+
+ return val;
+}
+
+static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct board_info *db = bus->priv;
+
+ if (addr == DM9051_PHY_ADDR)
+ return dm9051_phywrite(db, regnum, val);
+
+ return -ENODEV;
+}
+
+static void dm9051_reg_lock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_lock(&db->reg_mutex);
+}
+
+static void dm9051_reg_unlock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_unlock(&db->reg_mutex);
+}
+
+static struct regmap_config regconfigdm = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+};
+
+static struct regmap_config regconfigdmbulk = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
+{
+ /* create two regmap instances,
+ * split read/write and bulk_read/bulk_write to individual regmap
+ * to resolve regmap execution confliction problem
+ */
+ regconfigdm.lock_arg = db;
+ db->regmap_dm = devm_regmap_init_spi(db->spidev, &regconfigdm);
+ if (IS_ERR(db->regmap_dm))
+ return PTR_ERR(db->regmap_dm);
+
+ regconfigdmbulk.lock_arg = db;
+ db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, &regconfigdmbulk);
+ if (IS_ERR(db->regmap_dmbulk))
+ return PTR_ERR(db->regmap_dmbulk);
+
+ return 0;
+}
+
+static int dm9051_map_chipid(struct board_info *db)
+{
+ struct device *dev = &db->spidev->dev;
+ unsigned short wid;
+ u8 buff[6];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff));
+ if (ret < 0)
+ return ret;
+
+ wid = get_unaligned_le16(buff + 2);
+ if (wid != DM9051_ID) {
+ dev_err(dev, "chipid error as %04x !\n", wid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "chip %04x found\n", wid);
+ return 0;
+}
+
+/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on
+ */
+static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr));
+ if (ret < 0)
+ return ret;
+
+ if (!is_valid_ether_addr(addr)) {
+ eth_hw_addr_random(ndev);
+
+ ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&db->spidev->dev, "Use random MAC address\n");
+ return 0;
+ }
+
+ eth_hw_addr_set(ndev, addr);
+ return 0;
+}
+
+/* ethtool-ops
+ */
+static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRVNAME_9051, sizeof(info->driver));
+}
+
+static void dm9051_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->msg_enable = value;
+}
+
+static u32 dm9051_get_msglevel(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ return db->msg_enable;
+}
+
+static int dm9051_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9051_get_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int dm9051_set_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void dm9051_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ *pause = db->pause;
+}
+
+static int dm9051_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->pause = *pause;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ return dm9051_update_fcr(db);
+
+ phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+ phy_start_aneg(db->phydev);
+ return 0;
+}
+
+static const struct ethtool_ops dm9051_ethtool_ops = {
+ .get_drvinfo = dm9051_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = dm9051_get_msglevel,
+ .set_msglevel = dm9051_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dm9051_get_eeprom_len,
+ .get_eeprom = dm9051_get_eeprom,
+ .set_eeprom = dm9051_set_eeprom,
+ .get_pauseparam = dm9051_get_pauseparam,
+ .set_pauseparam = dm9051_set_pauseparam,
+};
+
+static int dm9051_all_start(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power on of the internal phy
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, 0);
+ if (ret)
+ return ret;
+
+ /* dm9051 chip registers could not be accessed within 1 ms
+ * after GPR power on, delay 1 ms is essential
+ */
+ msleep(1);
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ return dm9051_enable_interrupt(db);
+}
+
+static int dm9051_all_stop(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power off of the internal phy,
+ * The internal phy still could be accessed after this GPR power off control
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF);
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE);
+}
+
+/* fifo reset while rx error found
+ */
+static int dm9051_all_restart(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ret;
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_enable_interrupt(db);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n",
+ db->bc.status_err_counter + db->bc.large_err_counter,
+ db->bc.fifo_rst_counter);
+
+ ret = dm9051_set_recv(db);
+ if (ret)
+ return ret;
+
+ return dm9051_set_fcr(db);
+}
+
+/* read packets from the fifo memory
+ * return value,
+ * > 0 - read packet number, caller can repeat the rx operation
+ * 0 - no error, caller need stop further rx operation
+ * -EBUSY - read data error, caller escape from rx operation
+ */
+static int dm9051_loop_rx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rxbyte;
+ int ret, rxlen;
+ struct sk_buff *skb;
+ u8 *rdptr;
+ int scanrr = 0;
+
+ do {
+ ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2);
+ if (ret)
+ return ret;
+
+ if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY)
+ break; /* exhaust-empty */
+
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE);
+ if (ret)
+ return ret;
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ rxlen = le16_to_cpu(db->rxhdr.rxlen);
+ if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) {
+ netdev_dbg(ndev, "rxhdr-byte (%02x)\n",
+ db->rxhdr.headbyte);
+
+ if (db->rxhdr.status & RSR_ERR_BITS) {
+ db->bc.status_err_counter++;
+ netdev_dbg(ndev, "check rxstatus-error (%02x)\n",
+ db->rxhdr.status);
+ } else {
+ db->bc.large_err_counter++;
+ netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n",
+ rxlen, DM9051_PKT_MAX);
+ }
+ return dm9051_all_restart(db);
+ }
+
+ skb = dev_alloc_skb(rxlen);
+ if (!skb) {
+ ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen);
+ if (ret)
+ return ret;
+ return scanrr;
+ }
+
+ rdptr = skb_put(skb, rxlen - 4);
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen);
+ if (ret) {
+ db->bc.rx_err_counter++;
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ skb->protocol = eth_type_trans(skb, db->ndev);
+ if (db->ndev->features & NETIF_F_RXCSUM)
+ skb_checksum_none_assert(skb);
+ netif_rx(skb);
+ db->ndev->stats.rx_bytes += rxlen;
+ db->ndev->stats.rx_packets++;
+ scanrr++;
+ } while (!ret);
+
+ return scanrr;
+}
+
+/* transmit a packet,
+ * return value,
+ * 0 - succeed
+ * -ETIMEDOUT - timeout error
+ */
+static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len)
+{
+ int ret;
+
+ ret = dm9051_nsr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len);
+ if (ret)
+ return ret;
+
+ ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2);
+ if (ret < 0)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ);
+}
+
+static int dm9051_loop_tx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ntx = 0;
+ int ret;
+
+ while (!skb_queue_empty(&db->txq)) {
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&db->txq);
+ if (skb) {
+ ntx++;
+ ret = dm9051_single_tx(db, skb->data, skb->len);
+ len = skb->len;
+ dev_kfree_skb(skb);
+ if (ret < 0) {
+ db->bc.tx_err_counter++;
+ return 0;
+ }
+ ndev->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ }
+
+ if (netif_queue_stopped(ndev) &&
+ (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER))
+ netif_wake_queue(ndev);
+ }
+
+ return ntx;
+}
+
+static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw)
+{
+ struct board_info *db = pw;
+ int result, result_tx;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_disable_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ result = dm9051_clear_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ do {
+ result = dm9051_loop_rx(db); /* threaded irq rx */
+ if (result < 0)
+ goto out_unlock;
+ result_tx = dm9051_loop_tx(db); /* more tx better performance */
+ if (result_tx < 0)
+ goto out_unlock;
+ } while (result > 0);
+
+ dm9051_enable_interrupt(db);
+
+ /* To exit and has mutex unlock while rx or tx error
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+
+ return IRQ_HANDLED;
+}
+
+static void dm9051_tx_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, tx_work);
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_loop_tx(db);
+ if (result < 0)
+ netdev_err(db->ndev, "transmit packet error\n");
+
+ mutex_unlock(&db->spi_lockm);
+}
+
+static void dm9051_rxctl_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, rxctrl_work);
+ struct net_device *ndev = db->ndev;
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (result < 0)
+ goto out_unlock;
+
+ dm9051_set_recv(db);
+
+ /* To has mutex unlock and return from this function if regmap function fail
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+}
+
+/* Open network device
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device
+ */
+static int dm9051_open(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->imr_all = IMR_PAR | IMR_PRM;
+ db->lcr_all = LMCR_MODE1;
+ db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table));
+
+ ndev->irq = spi->irq; /* by dts */
+ ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq,
+ dm9051_irq_flag(db) | IRQF_ONESHOT,
+ ndev->name, db);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get irq\n");
+ return ret;
+ }
+
+ phy_support_sym_pause(db->phydev);
+ phy_start(db->phydev);
+
+ /* flow control parameters init */
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ db->pause.autoneg = AUTONEG_DISABLE;
+
+ if (db->phydev->autoneg)
+ db->pause.autoneg = AUTONEG_ENABLE;
+
+ ret = dm9051_all_start(db);
+ if (ret) {
+ phy_stop(db->phydev);
+ free_irq(spi->irq, db);
+ return ret;
+ }
+
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+/* Close network device
+ * Called to close down a network device which has been active. Cancel any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state while it is not being used
+ */
+static int dm9051_stop(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = dm9051_all_stop(db);
+ if (ret)
+ return ret;
+
+ flush_work(&db->tx_work);
+ flush_work(&db->rxctrl_work);
+
+ phy_stop(db->phydev);
+
+ free_irq(db->spidev->irq, db);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&db->txq);
+
+ return 0;
+}
+
+/* event: play a schedule starter in condition
+ */
+static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ skb_queue_tail(&db->txq, skb);
+ if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER)
+ netif_stop_queue(ndev); /* enforce limit queue size */
+
+ schedule_work(&db->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* event: play with a schedule starter
+ */
+static void dm9051_set_rx_mode(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct dm9051_rxctrl rxctrl;
+ struct netdev_hw_addr *ha;
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ u32 hash_val;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ /* rx control */
+ if (ndev->flags & IFF_PROMISC) {
+ rcr |= RCR_PRMSC;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ rcr |= RCR_ALL;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr);
+ }
+
+ rxctrl.rcr_all = rcr;
+
+ /* broadcast address */
+ rxctrl.hash_table[0] = 0;
+ rxctrl.hash_table[1] = 0;
+ rxctrl.hash_table[2] = 0;
+ rxctrl.hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0);
+ rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16);
+ }
+
+ /* schedule work to do the actual set of the data if needed */
+
+ if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) {
+ memcpy(&db->rctl, &rxctrl, sizeof(rxctrl));
+ schedule_work(&db->rxctrl_work);
+ }
+}
+
+/* event: write into the mac registers and eeprom directly
+ */
+static int dm9051_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(ndev, p);
+ return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+}
+
+static const struct net_device_ops dm9051_netdev_ops = {
+ .ndo_open = dm9051_open,
+ .ndo_stop = dm9051_stop,
+ .ndo_start_xmit = dm9051_start_xmit,
+ .ndo_set_rx_mode = dm9051_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = dm9051_set_mac_address,
+};
+
+static void dm9051_operation_clear(struct board_info *db)
+{
+ db->bc.status_err_counter = 0;
+ db->bc.large_err_counter = 0;
+ db->bc.rx_err_counter = 0;
+ db->bc.tx_err_counter = 0;
+ db->bc.fifo_rst_counter = 0;
+}
+
+static int dm9051_mdio_register(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!db->mdiobus)
+ return -ENOMEM;
+
+ db->mdiobus->priv = db;
+ db->mdiobus->read = dm9051_mdio_read;
+ db->mdiobus->write = dm9051_mdio_write;
+ db->mdiobus->name = "dm9051-mdiobus";
+ db->mdiobus->phy_mask = (u32)~BIT(1);
+ db->mdiobus->parent = &spi->dev;
+ snprintf(db->mdiobus->id, MII_BUS_ID_SIZE,
+ "dm9051-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, db->mdiobus);
+ if (ret)
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+
+ return ret;
+}
+
+static void dm9051_handle_link_change(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_print_status(db->phydev);
+
+ /* only write pause settings to mac. since mac and phy are integrated
+ * together, such as link state, speed and duplex are sync already
+ */
+ if (db->phydev->link) {
+ if (db->phydev->pause) {
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ }
+ dm9051_update_fcr(db);
+ }
+}
+
+/* phy connect as poll mode
+ */
+static int dm9051_phy_connect(struct board_info *db)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ db->mdiobus->id, DM9051_PHY_ADDR);
+
+ db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(db->phydev))
+ return PTR_ERR_OR_ZERO(db->phydev);
+ return 0;
+}
+
+static int dm9051_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev;
+ struct board_info *db;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct board_info));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
+
+ db = netdev_priv(ndev);
+
+ db->msg_enable = 0;
+ db->spidev = spi;
+ db->ndev = ndev;
+
+ ndev->netdev_ops = &dm9051_netdev_ops;
+ ndev->ethtool_ops = &dm9051_ethtool_ops;
+
+ mutex_init(&db->spi_lockm);
+ mutex_init(&db->reg_mutex);
+
+ INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay);
+ INIT_WORK(&db->tx_work, dm9051_tx_delay);
+
+ ret = dm9051_map_init(spi, db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_chipid(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_etherdev_par(ndev, db);
+ if (ret < 0)
+ return ret;
+
+ ret = dm9051_mdio_register(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_phy_connect(db);
+ if (ret)
+ return ret;
+
+ dm9051_operation_clear(db);
+ skb_queue_head_init(&db->txq);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ phy_disconnect(db->phydev);
+ return dev_err_probe(dev, ret, "device register failed");
+ }
+
+ return 0;
+}
+
+static void dm9051_drv_remove(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_disconnect(db->phydev);
+}
+
+static const struct of_device_id dm9051_match_table[] = {
+ { .compatible = "davicom,dm9051" },
+ {}
+};
+
+static const struct spi_device_id dm9051_id_table[] = {
+ { "dm9051", 0 },
+ {}
+};
+
+static struct spi_driver dm9051_driver = {
+ .driver = {
+ .name = DRVNAME_9051,
+ .of_match_table = dm9051_match_table,
+ },
+ .probe = dm9051_probe,
+ .remove = dm9051_drv_remove,
+ .id_table = dm9051_id_table,
+};
+module_spi_driver(dm9051_driver);
+
+MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>");
+MODULE_DESCRIPTION("Davicom DM9051 network SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/davicom/dm9051.h b/drivers/net/ethernet/davicom/dm9051.h
new file mode 100644
index 000000000000..fef3120edd7c
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _DM9051_H_
+#define _DM9051_H_
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#define DM9051_ID 0x9051
+
+#define DM9051_NCR 0x00
+#define DM9051_NSR 0x01
+#define DM9051_TCR 0x02
+#define DM9051_RCR 0x05
+#define DM9051_BPTR 0x08
+#define DM9051_FCR 0x0A
+#define DM9051_EPCR 0x0B
+#define DM9051_EPAR 0x0C
+#define DM9051_EPDRL 0x0D
+#define DM9051_EPDRH 0x0E
+#define DM9051_PAR 0x10
+#define DM9051_MAR 0x16
+#define DM9051_GPCR 0x1E
+#define DM9051_GPR 0x1F
+
+#define DM9051_VIDL 0x28
+#define DM9051_VIDH 0x29
+#define DM9051_PIDL 0x2A
+#define DM9051_PIDH 0x2B
+#define DM9051_SMCR 0x2F
+#define DM9051_ATCR 0x30
+#define DM9051_SPIBCR 0x38
+#define DM9051_INTCR 0x39
+#define DM9051_PPCR 0x3D
+
+#define DM9051_MPCR 0x55
+#define DM9051_LMCR 0x57
+#define DM9051_MBNDRY 0x5E
+
+#define DM9051_MRRL 0x74
+#define DM9051_MRRH 0x75
+#define DM9051_MWRL 0x7A
+#define DM9051_MWRH 0x7B
+#define DM9051_TXPLL 0x7C
+#define DM9051_TXPLH 0x7D
+#define DM9051_ISR 0x7E
+#define DM9051_IMR 0x7F
+
+#define DM_SPI_MRCMDX 0x70
+#define DM_SPI_MRCMD 0x72
+#define DM_SPI_MWCMD 0x78
+
+#define DM_SPI_WR 0x80
+
+/* dm9051 Ethernet controller registers bits
+ */
+/* 0x00 */
+#define NCR_WAKEEN BIT(6)
+#define NCR_FDX BIT(3)
+#define NCR_RST BIT(0)
+/* 0x01 */
+#define NSR_SPEED BIT(7)
+#define NSR_LINKST BIT(6)
+#define NSR_WAKEST BIT(5)
+#define NSR_TX2END BIT(3)
+#define NSR_TX1END BIT(2)
+/* 0x02 */
+#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */
+#define TCR_TXREQ BIT(0)
+/* 0x05 */
+#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */
+#define RCR_DIS_LONG BIT(5)
+#define RCR_DIS_CRC BIT(4)
+#define RCR_ALL BIT(3)
+#define RCR_PRMSC BIT(1)
+#define RCR_RXEN BIT(0)
+#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC)
+/* 0x06 */
+#define RSR_RF BIT(7)
+#define RSR_MF BIT(6)
+#define RSR_LCS BIT(5)
+#define RSR_RWTO BIT(4)
+#define RSR_PLE BIT(3)
+#define RSR_AE BIT(2)
+#define RSR_CE BIT(1)
+#define RSR_FOE BIT(0)
+#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \
+ RSR_AE | RSR_CE | RSR_FOE)
+/* 0x0A */
+#define FCR_TXPEN BIT(5)
+#define FCR_BKPM BIT(3)
+#define FCR_FLCE BIT(0)
+#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE)
+/* 0x0B */
+#define EPCR_WEP BIT(4)
+#define EPCR_EPOS BIT(3)
+#define EPCR_ERPRR BIT(2)
+#define EPCR_ERPRW BIT(1)
+#define EPCR_ERRE BIT(0)
+/* 0x1E */
+#define GPCR_GEP_CNTL BIT(0)
+/* 0x1F */
+#define GPR_PHY_OFF BIT(0)
+/* 0x30 */
+#define ATCR_AUTO_TX BIT(7)
+/* 0x39 */
+#define INTCR_POL_LOW (1 << 0)
+#define INTCR_POL_HIGH (0 << 0)
+/* 0x3D */
+/* Pause Packet Control Register - default = 1 */
+#define PPCR_PAUSE_COUNT 0x08
+/* 0x55 */
+#define MPCR_RSTTX BIT(1)
+#define MPCR_RSTRX BIT(0)
+/* 0x57 */
+/* LEDMode Control Register - LEDMode1 */
+/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */
+#define LMCR_NEWMOD BIT(7)
+#define LMCR_TYPED1 BIT(1)
+#define LMCR_TYPED0 BIT(0)
+#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0)
+/* 0x5E */
+#define MBNDRY_BYTE BIT(7)
+/* 0xFE */
+#define ISR_MBS BIT(7)
+#define ISR_LNKCHG BIT(5)
+#define ISR_ROOS BIT(3)
+#define ISR_ROS BIT(2)
+#define ISR_PTS BIT(1)
+#define ISR_PRS BIT(0)
+#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \
+ ISR_PTS | ISR_PRS)
+#define ISR_STOP_MRCMD (ISR_MBS)
+/* 0xFF */
+#define IMR_PAR BIT(7)
+#define IMR_LNKCHGI BIT(5)
+#define IMR_PTM BIT(1)
+#define IMR_PRM BIT(0)
+
+/* Const
+ */
+#define DM9051_PHY_ADDR 1 /* PHY id */
+#define DM9051_PHY 0x40 /* PHY address 0x01 */
+#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9051_PKT_MAX 1536 /* Received packet max size */
+#define DM9051_TX_QUE_HI_WATER 50
+#define DM9051_TX_QUE_LO_WATER 25
+#define DM_EEPROM_MAGIC 0x9051
+
+#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr)
+
+static inline struct board_info *to_dm9051_board(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+#endif /* _DM9051_H_ */
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 3fb39e32e1b4..653bde48ef44 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -21,7 +21,7 @@ void pnic_do_nway(struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
u32 phy_reg = ioread32(ioaddr + 0xB8);
- u32 new_csr6 = tp->csr6 & ~0x40C40200;
+ u32 new_csr6;
if (phy_reg & 0x78000000) { /* Ignore baseT4 */
if (phy_reg & 0x20000000) dev->if_port = 5;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c710dc17be90..8dd7bf9014ec 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -340,7 +340,7 @@ enum wake_event_bits {
struct netdev_desc {
__le32 next_desc;
__le32 status;
- struct desc_frag { __le32 addr, length; } frag[1];
+ struct desc_frag { __le32 addr, length; } frag;
};
/* Bits in netdev_desc.status */
@@ -980,8 +980,8 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
- le32_to_cpu(np->tx_ring[i].frag[0].length));
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
}
printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
ioread32(np->base + TxListPtr),
@@ -1027,7 +1027,7 @@ static void init_ring(struct net_device *dev)
np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
- np->rx_ring[i].frag[0].length = 0;
+ np->rx_ring[i].frag.length = 0;
np->rx_skbuff[i] = NULL;
}
@@ -1039,16 +1039,16 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag[0].addr = cpu_to_le32(
+ np->rx_ring[i].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag[0].addr)) {
+ np->rx_ring[i].frag.addr)) {
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
break;
}
- np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1097,12 +1097,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+ txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
skb->data, skb->len, DMA_TO_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag[0].addr))
+ txdesc->frag.addr))
goto drop_frame;
- txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
np->cur_tx++;
@@ -1151,7 +1151,7 @@ reset_tx (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
np->tx_skbuff[i] = NULL;
@@ -1271,12 +1271,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
} else {
@@ -1290,12 +1290,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
}
@@ -1372,16 +1372,16 @@ static void rx_poll(struct tasklet_struct *t)
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
dma_sync_single_for_device(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
@@ -1427,18 +1427,18 @@ static void refill_rx (struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag[0].addr = cpu_to_le32(
+ np->rx_ring[entry].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag[0].addr)) {
+ np->rx_ring[entry].frag.addr)) {
dev_kfree_skb_irq(skb);
np->rx_skbuff[entry] = NULL;
break;
}
}
/* Perhaps we need not reset this field. */
- np->rx_ring[entry].frag[0].length =
+ np->rx_ring[entry].frag.length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
cnt++;
@@ -1870,14 +1870,14 @@ static int netdev_close(struct net_device *dev)
(int)(np->tx_ring_dma));
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
- i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
- np->tx_ring[i].frag[0].length);
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)(np->rx_ring_dma));
for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
- np->rx_ring[i].frag[0].length);
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
}
}
#endif /* __i386__ debugging only */
@@ -1892,19 +1892,19 @@ static int netdev_close(struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->rx_ring[i].frag[0].addr),
+ le32_to_cpu(np->rx_ring[i].frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
- np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 323340826dab..69dbf950d451 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -608,7 +608,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0f90d2d5bb60..4b047255d928 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -18,6 +18,7 @@
#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
+#include <net/tso.h>
#include "dpaa2-eth.h"
@@ -34,6 +35,75 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
struct ptp_qoriq *dpaa2_ptp;
EXPORT_SYMBOL(dpaa2_ptp);
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -695,7 +765,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
struct ptp_tstamp origin_timestamp;
- struct dpni_single_step_cfg cfg;
u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
struct dpaa2_fas *fas;
@@ -749,17 +818,48 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
htonl(origin_timestamp.sec_lsb);
*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
- cfg.en = 1;
- cfg.ch_update = udp;
- cfg.offset = offset1;
- cfg.peer_delay = 0;
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
- if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
- &cfg))
- WARN_ONCE(1, "Failed to set single step register");
}
}
+static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
@@ -805,12 +905,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -846,6 +945,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
err = -ENOMEM;
goto dma_map_single_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
@@ -855,7 +955,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
return 0;
dma_map_single_failed:
- skb_free_frag(sgt_buf);
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
sgt_buf_alloc_failed:
dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
dma_map_sg_failed:
@@ -875,7 +975,6 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
struct dpaa2_eth_swa *swa;
dma_addr_t addr, sgt_addr;
@@ -884,18 +983,10 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
int err;
/* Prepare the HW SGT structure */
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
-
- if (sgt_cache->count == 0)
- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
- GFP_ATOMIC);
- else
- sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf))
return -ENOMEM;
-
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
@@ -923,6 +1014,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
goto sgt_map_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, sgt_addr);
@@ -934,10 +1026,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
sgt_map_failed:
dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
data_map_failed:
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(sgt_buf);
- else
- sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
return err;
}
@@ -978,6 +1067,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
@@ -1005,9 +1095,9 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
u32 fd_len = dpaa2_fd_get_len(fd);
-
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ int i;
fd_addr = dpaa2_fd_get_addr(fd);
buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
@@ -1039,6 +1129,28 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
/* Unmap the SGT buffer */
dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap and free the header */
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)));
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
} else {
skb = swa->single.skb;
@@ -1067,55 +1179,195 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (skb->cb[0] == TX_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- __le64 *ts = dpaa2_get_ts(buffer_start, true);
- u64 ns;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
- mutex_unlock(&priv->onestep_tstamp_lock);
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
}
/* Free SGT buffer allocated on tx */
- if (fd_format != dpaa2_fd_single) {
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
- if (swa->type == DPAA2_ETH_SWA_SG) {
- skb_free_frag(buffer_start);
- } else {
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(buffer_start);
- else
- sgt_cache->buf[sgt_cache->count++] = buffer_start;
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
+ }
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
}
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
}
- /* Move on with skb release */
- napi_consume_skb(skb, in_napi);
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
+
+ return err;
}
static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_fd fd;
- struct rtnl_link_stats64 *percpu_stats;
+ int total_enqueued = 0, retries = 0, enqueued;
struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
struct dpaa2_eth_fq *fq;
struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
u16 queue_mapping;
- unsigned int needed_headroom;
- u32 fd_len;
+ void *swa = NULL;
u8 prio = 0;
int err, i;
- void *swa;
+ u32 fd_len;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
needed_headroom = dpaa2_eth_needed_headroom(skb);
@@ -1130,20 +1382,28 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
}
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
- if (skb_is_nonlinear(skb)) {
- err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else if (skb_headroom(skb) < needed_headroom) {
- err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else {
- err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
}
if (unlikely(err)) {
@@ -1151,11 +1411,12 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
goto err_build_fd;
}
- if (skb->cb[0])
- dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
/* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
/* TxConf FQ selection relies on queue id from the stack.
* In case of a forwarded frame from another DPNI interface, we choose
@@ -1175,27 +1436,32 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping];
-
- fd_len = dpaa2_fd_get_len(&fd);
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with
* the Tx confirmation callback for this frame
*/
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
- if (err != -EBUSY)
- break;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
}
- percpu_extras->tx_portal_busy += i;
+ percpu_extras->tx_portal_busy += retries;
+
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, fq, fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- percpu_stats->tx_packets++;
+ percpu_stats->tx_packets += total_enqueued;
percpu_stats->tx_bytes += fd_len;
}
@@ -1523,7 +1789,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
count = sgt_cache->count;
for (i = 0; i < count; i++)
- kfree(sgt_cache->buf[i]);
+ skb_free_frag(sgt_cache->buf[i]);
sgt_cache->count = 0;
}
}
@@ -1811,8 +2077,10 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (dpaa2_eth_is_type_phy(priv))
+ if (dpaa2_eth_is_type_phy(priv)) {
+ dpaa2_mac_start(priv->mac);
phylink_start(priv->mac->phylink);
+ }
return 0;
@@ -1887,6 +2155,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
if (dpaa2_eth_is_type_phy(priv)) {
phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
@@ -2207,6 +2476,9 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -4100,6 +4372,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
return err;
}
+ dpaa2_eth_detect_features(priv);
+
/* Capabilities listing */
supported |= IFF_LIVE_ADDR_CHANGE;
@@ -4115,7 +4389,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC;
+ NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
if (priv->dpni_attrs.vlan_filter_entries)
@@ -4397,6 +4672,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
@@ -4484,6 +4766,8 @@ err_poll_thread:
err_alloc_rings:
err_csum:
err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
free_percpu(priv->sgt_cache);
err_alloc_sgt_cache:
free_percpu(priv->percpu_extras);
@@ -4539,6 +4823,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_free_irqs(ls_dev);
dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
@@ -4547,6 +4832,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_free_dpbp(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index e54e70ebdd05..447718483ef4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -122,6 +122,7 @@ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_SW_TSO,
};
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
@@ -142,6 +143,12 @@ struct dpaa2_eth_swa {
int dma_size;
struct xdp_frame *xdpf;
} xdp;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
};
};
@@ -354,6 +361,8 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Linear skbs sent as a S/G FD due to insufficient headroom */
@@ -493,8 +502,15 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 200
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -510,12 +526,15 @@ struct dpaa2_eth_priv {
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
-
+ unsigned long features;
struct dpni_attr dpni_attrs;
u16 dpni_ver_major;
u16 dpni_ver_minor;
u16 tx_data_offset;
-
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid;
@@ -577,6 +596,8 @@ struct dpaa2_eth_priv {
struct devlink_port devlink_port;
u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
};
struct dpaa2_eth_devlink_priv {
@@ -655,6 +676,13 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
#define DPNI_PAUSE_VER_MAJOR 7
#define DPNI_PAUSE_VER_MINOR 13
#define dpaa2_eth_has_pause_support(priv) \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 3fdbf87dccb1..eea7d7a07c00 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -44,6 +44,8 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] tx converted sg frames",
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 623d113b6581..c48811d3bcd5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -11,6 +12,28 @@
#define phylink_to_dpaa2_mac(config) \
container_of((config), struct dpaa2_mac, phylink_config)
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
{
*if_mode = PHY_INTERFACE_MODE_NA;
@@ -38,6 +61,29 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
@@ -100,6 +146,14 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+
+ return mac->pcs;
+}
+
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -117,6 +171,19 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
if (err)
netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
__func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
}
static void dpaa2_mac_link_up(struct phylink_config *config,
@@ -172,6 +239,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -226,10 +294,66 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
}
}
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
struct phylink *phylink;
int err;
@@ -246,6 +370,20 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return -EINVAL;
mac->if_mode = err;
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
/* The MAC does not have the capability to add RGMII delays so
* error out if the interface mode requests them and there is no PHY
* to act upon them
@@ -274,25 +412,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
MAC_10000FD;
- /* We support the current interface mode, and if we have a PCS
- * similar interface modes that do not require the PLLs to be
- * reconfigured.
- */
- __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
- if (mac->pcs) {
- switch (mac->if_mode) {
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- mac->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- mac->phylink_config.supported_interfaces);
- break;
-
- default:
- break;
- }
- }
+ dpaa2_mac_set_supported_interfaces(mac);
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
@@ -303,9 +423,6 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
}
mac->phylink = phylink;
- if (mac->pcs)
- phylink_set_pcs(mac->phylink, mac->pcs);
-
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
@@ -330,6 +447,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
}
int dpaa2_mac_open(struct dpaa2_mac *mac)
@@ -353,6 +472,14 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 1331a8477fe4..a58cab188a99 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,8 @@ struct dpaa2_mac {
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -24,6 +26,8 @@ struct dpaa2_mac {
enum dpmac_link_type if_link_type;
struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
@@ -43,4 +47,8 @@ void dpaa2_mac_get_strings(u8 *data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 9a561072aa4a..e507e9065214 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -703,8 +703,10 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
dpaa2_switch_enable_ctrl_if_napi(ethsw);
- if (dpaa2_switch_port_is_type_phy(port_priv))
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ dpaa2_mac_start(port_priv->mac);
phylink_start(port_priv->mac->phylink);
+ }
return 0;
}
@@ -717,6 +719,7 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
if (dpaa2_switch_port_is_type_phy(port_priv)) {
phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index a24b20f76938..e9ac2ecef3be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -19,11 +19,15 @@
#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPMAC_MASK(field) \
GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -70,4 +74,12 @@ struct dpmac_rsp_get_counter {
__le64 counter;
};
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
index d5997b654562..f440a4c3b70c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -181,3 +181,57 @@ int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
return 0;
}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 8f7ceb731282..17488819ef68 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -205,4 +205,9 @@ enum dpmac_counter_id {
int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
enum dpmac_counter_id id, u64 *value);
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 9f80bdfeedec..828f538097af 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -98,7 +98,7 @@
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
-#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -658,12 +658,16 @@ struct dpni_cmd_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_rsp_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_cmd_enable_vlan_filter {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6afada99fb6..6c3b36f20fb8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -2136,6 +2136,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
PTP_CH_UPDATE) ? 1 : 0;
ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 7de0562bbf59..6fffd519aa00 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1074,12 +1074,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
* @peer_delay: For peer-to-peer transparent clocks add this value to the
* correction field in addition to the transient time update.
* The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
*/
struct dpni_single_step_cfg {
u8 en;
u8 ch_update;
u16 offset;
u32 peer_delay;
+ u32 ptp_onestep_reg_base;
};
int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb39e406b7fc..68d806dc3701 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -18,6 +18,8 @@
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+#define ENETC_CBD_DATA_MEM_ALIGN 64
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -415,6 +417,42 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
+ struct enetc_cbd *cbd,
+ int size, dma_addr_t *dma,
+ void **data_align)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ dma_addr_t dma_align;
+ void *data;
+
+ data = dma_alloc_coherent(ring->dma_dev,
+ size + ENETC_CBD_DATA_MEM_ALIGN,
+ dma, GFP_KERNEL);
+ if (!data) {
+ dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
+ return NULL;
+ }
+
+ dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
+ *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
+
+ cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+ cbd->length = cpu_to_le16(size);
+
+ return data;
+}
+
+static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
+ void *data, dma_addr_t *dma)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+
+ dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
+ data, *dma);
+}
+
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 073e56dcca4e..af68dc46a795 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -166,70 +166,55 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
-#define RFSE_ALIGN 64
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
+ dma_addr_t dma;
int err;
/* fill up the "set" descriptor */
cbd.cmd = 0;
cbd.cls = 4;
cbd.index = cpu_to_le16(index);
- cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
+ &dma, &tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RFSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
memcpy(tmp_align, rfse, sizeof(*rfse));
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
-
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- tmp, dma);
+ enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
return err;
}
-#define RSSE_ALIGN 64
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
+ dma_addr_t dma;
int err, i;
- if (count < RSSE_ALIGN)
+ if (count < ENETC_CBD_DATA_MEM_ALIGN)
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
+ &dma, (void *)&tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RSSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
if (!read)
for (i = 0; i < count; i++)
@@ -238,10 +223,6 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* fill up the descriptor */
cbd.cmd = read ? 2 : 1;
cbd.cls = 3;
- cbd.length = cpu_to_le16(count);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
@@ -251,7 +232,7 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
+ enetc_cbd_free_data_mem(si, count, tmp, &dma);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 70e6d97b380f..1c8f5cc6dec4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -147,7 +147,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* return all Fs if nothing was there */
if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ed16a5ac9ad0..a0c75c717073 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -934,18 +934,21 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
+static struct phylink_pcs *
+enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
static void enetc_pl_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- struct enetc_ndev_priv *priv;
enetc_mac_config(&pf->si->hw, state->interface);
-
- priv = netdev_priv(pf->si->ndev);
- if (pf->pcs)
- phylink_set_pcs(priv->phylink, pf->pcs);
}
static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
@@ -1062,6 +1065,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = enetc_pl_mac_select_pcs,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 3555c12edb45..79afb1d7289b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -52,10 +52,11 @@ static int enetc_setup_taprio(struct net_device *ndev,
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
- struct gce *gce;
dma_addr_t dma;
+ struct gce *gce;
u16 data_size;
u16 gcl_len;
+ void *tmp;
u32 tge;
int err;
int i;
@@ -82,8 +83,9 @@ static int enetc_setup_taprio(struct net_device *ndev,
gcl_config = &cbd.gcl_conf;
data_size = struct_size(gcl_data, entry, gcl_len);
- gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!gcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&gcl_data);
+ if (!tmp)
return -ENOMEM;
gce = (struct gce *)(gcl_data + 1);
@@ -107,19 +109,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
temp_gce->period = cpu_to_le32(temp_entry->interval);
}
- cbd.length = cpu_to_le16(data_size);
cbd.status_flags = 0;
- dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
- data_size, DMA_TO_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(gcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
@@ -132,8 +123,7 @@ static int enetc_setup_taprio(struct net_device *ndev,
ENETC_QBV_PTGCR_OFFSET,
tge & (~ENETC_QBV_TGE));
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
- kfree(gcl_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -450,6 +440,7 @@ static struct actions_fwd enetc_act_fwd[] = {
};
static struct enetc_psfp epsfp = {
+ .dev_bitmap = 0,
.psfp_sfi_bitmap = NULL,
};
@@ -463,8 +454,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_cbd cbd = {.cmd = 0};
struct streamid_data *si_data;
struct streamid_conf *si_conf;
- u16 data_size;
dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
int port;
int err;
@@ -485,21 +477,11 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct streamid_data);
- si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!si_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&si_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
- dma = dma_map_single(&priv->si->pdev->dev, si_data,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto out;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
eth_broadcast_addr(si_data->dmac);
si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
@@ -520,11 +502,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
- memset(&cbd, 0, sizeof(cbd));
-
- cbd.index = cpu_to_le16((u16)sid->index);
- cbd.cmd = 0;
- cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
cbd.status_flags = 0;
si_conf->en = 0x80;
@@ -537,11 +514,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
memset(si_data, 0, data_size);
- cbd.length = cpu_to_le16(data_size);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
/* VIDM default to be 1.
* VID Match. If set (b1) then the VID must match, otherwise
* any VID is considered a match. VIDM setting is only used
@@ -561,10 +533,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
out:
- if (!dma_mapping_error(&priv->si->pdev->dev, dma))
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
-
- kfree(si_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -635,6 +604,7 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
struct sfi_counter_data *data_buf;
dma_addr_t dma;
u16 data_size;
+ void *tmp;
int err;
cbd.index = cpu_to_le16((u16)index);
@@ -643,21 +613,11 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct sfi_counter_data);
- data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
-
- dma = dma_map_single(&priv->si->pdev->dev, data_buf,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto exit;
- }
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
- cbd.length = cpu_to_le16(data_size);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&data_buf);
+ if (!tmp)
+ return -ENOMEM;
err = enetc_send_cmd(priv->si, &cbd);
if (err)
@@ -684,7 +644,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
data_buf->flow_meter_dropl;
exit:
- kfree(data_buf);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
return err;
}
@@ -726,6 +687,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
dma_addr_t dma;
u16 data_size;
int err, i;
+ void *tmp;
u64 now;
cbd.index = cpu_to_le16(sgi->index);
@@ -772,25 +734,11 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
-
- sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!sgcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&sgcl_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
-
- dma = dma_map_single(&priv->si->pdev->dev,
- sgcl_data, data_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(sgcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
sgce = &sgcl_data->sgcl[0];
sgcl_config->agtst = 0x80;
@@ -844,8 +792,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
exit:
- kfree(sgcl_data);
-
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -1074,6 +1021,46 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts,
return NULL;
}
+static int enetc_psfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
struct flow_cls_offload *f)
{
@@ -1230,11 +1217,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
- if (entryp->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- err = -EOPNOTSUPP;
+ err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
+ if (err)
goto free_sfi;
- }
+
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 796133de527e..11227f51404c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2797,7 +2797,7 @@ static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
int ret = 0;
if (enable) {
- ret = phy_init_eee(ndev->phydev, 0);
+ ret = phy_init_eee(ndev->phydev, false);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index af99017a5453..7d49c28215f3 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -101,7 +101,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
u32 val, tempval;
struct timespec64 ts;
u64 ns;
- val = 0;
if (fep->pps_enable == enable)
return 0;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 266e562bd67a..ec90da1de030 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/acpi_mdio.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mdio.h>
@@ -36,9 +37,10 @@ struct tgec_mdio_controller {
} __packed;
#define MDIO_STAT_ENC BIT(6)
-#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_CLKDIV(x) (((x) & 0x1ff) << 7)
#define MDIO_STAT_BSY BIT(0)
#define MDIO_STAT_RD_ER BIT(1)
+#define MDIO_STAT_PRE_DIS BIT(5)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_CTL_PRE_DIS BIT(10)
@@ -50,6 +52,8 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
+ struct clk *enet_clk;
+ u32 mdc_freq;
bool is_little_endian;
bool has_a009885;
bool has_a011043;
@@ -239,7 +243,7 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
!priv->has_a011043) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
ret = 0xffff;
} else {
@@ -254,6 +258,50 @@ irq_restore:
return ret;
}
+static int xgmac_mdio_set_mdc_freq(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat, div;
+
+ if (device_property_read_u32(dev, "clock-frequency", &priv->mdc_freq))
+ return 0;
+
+ priv->enet_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->enet_clk)) {
+ dev_err(dev, "Input clock unknown, not changing MDC frequency");
+ return PTR_ERR(priv->enet_clk);
+ }
+
+ div = ((clk_get_rate(priv->enet_clk) / priv->mdc_freq) - 1) / 2;
+ if (div < 5 || div > 0x1ff) {
+ dev_err(dev, "Requested MDC frequency is out of range, ignoring");
+ return -EINVAL;
+ }
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat &= ~MDIO_STAT_CLKDIV(0x1ff);
+ mdio_stat |= MDIO_STAT_CLKDIV(div);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+ return 0;
+}
+
+static void xgmac_mdio_set_suppress_preamble(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat;
+
+ if (!device_property_read_bool(dev, "suppress-preamble"))
+ return;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat |= MDIO_STAT_PRE_DIS;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+}
+
static int xgmac_mdio_probe(struct platform_device *pdev)
{
struct fwnode_handle *fwnode;
@@ -273,7 +321,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return -EINVAL;
}
- bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(struct mdio_fsl_priv));
if (!bus)
return -ENOMEM;
@@ -284,13 +332,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
bus->probe_capabilities = MDIOBUS_C22_C45;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
- /* Set the PHY base address */
priv = bus->priv;
- priv->mdio_base = ioremap(res->start, resource_size(res));
- if (!priv->mdio_base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ priv->mdio_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!priv->mdio_base)
+ return -ENOMEM;
/* For both ACPI and DT cases, endianness of MDIO controller
* needs to be specified using "little-endian" property.
@@ -303,6 +349,12 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
+ xgmac_mdio_set_suppress_preamble(bus);
+
+ ret = xgmac_mdio_set_mdc_freq(bus);
+ if (ret)
+ return ret;
+
fwnode = pdev->dev.fwnode;
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
@@ -312,32 +364,12 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
ret = -EINVAL;
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return ret;
}
platform_set_drvdata(pdev, bus);
return 0;
-
-err_registration:
- iounmap(priv->mdio_base);
-
-err_ioremap:
- mdiobus_free(bus);
-
- return ret;
-}
-
-static int xgmac_mdio_remove(struct platform_device *pdev)
-{
- struct mii_bus *bus = platform_get_drvdata(pdev);
- struct mdio_fsl_priv *priv = bus->priv;
-
- mdiobus_unregister(bus);
- iounmap(priv->mdio_base);
- mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id xgmac_mdio_match[] = {
@@ -364,7 +396,6 @@ static struct platform_driver xgmac_mdio_driver = {
.acpi_match_table = xgmac_acpi_match,
},
.probe = xgmac_mdio_probe,
- .remove = xgmac_mdio_remove,
};
module_platform_driver(xgmac_mdio_driver);
diff --git a/drivers/net/ethernet/fungible/Kconfig b/drivers/net/ethernet/fungible/Kconfig
new file mode 100644
index 000000000000..1ecedecc0f6c
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible network driver configuration
+#
+
+config NET_VENDOR_FUNGIBLE
+ bool "Fungible devices"
+ default y
+ help
+ If you have a Fungible network device, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Fungible cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_FUNGIBLE
+
+config FUN_CORE
+ tristate
+ select SBITMAP
+ help
+ A service module offering basic common services to Fungible
+ device drivers.
+
+source "drivers/net/ethernet/fungible/funeth/Kconfig"
+
+endif # NET_VENDOR_FUNGIBLE
diff --git a/drivers/net/ethernet/fungible/Makefile b/drivers/net/ethernet/fungible/Makefile
new file mode 100644
index 000000000000..df759f1585a1
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+#
+# Makefile for the Fungible network device drivers.
+#
+
+obj-$(CONFIG_FUN_CORE) += funcore/
+obj-$(CONFIG_FUN_ETH) += funeth/
diff --git a/drivers/net/ethernet/fungible/funcore/Makefile b/drivers/net/ethernet/fungible/funcore/Makefile
new file mode 100644
index 000000000000..bc16b264b53e
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+obj-$(CONFIG_FUN_CORE) += funcore.o
+
+funcore-y := fun_dev.o fun_queue.o
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
new file mode 100644
index 000000000000..5d7aef73df61
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+
+#include "fun_queue.h"
+#include "fun_dev.h"
+
+#define FUN_ADMIN_CMD_TO_MS 3000
+
+enum {
+ AQA_ASQS_SHIFT = 0,
+ AQA_ACQS_SHIFT = 16,
+ AQA_MIN_QUEUE_SIZE = 2,
+ AQA_MAX_QUEUE_SIZE = 4096
+};
+
+/* context for admin commands */
+struct fun_cmd_ctx {
+ fun_admin_callback_t cb; /* callback to invoke on completion */
+ void *cb_data; /* user data provided to callback */
+ int cpu; /* CPU where the cmd's tag was allocated */
+};
+
+/* Context for synchronous admin commands. */
+struct fun_sync_cmd_ctx {
+ struct completion compl;
+ u8 *rsp_buf; /* caller provided response buffer */
+ unsigned int rsp_len; /* response buffer size */
+ u8 rsp_status; /* command response status */
+};
+
+/* Wait for the CSTS.RDY bit to match @enabled. */
+static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
+{
+ unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+ unsigned long deadline;
+
+ deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */
+
+ for (;;) {
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+
+ if ((csts & NVME_CSTS_RDY) == bit)
+ return 0;
+
+ if (time_is_before_jiffies(deadline))
+ break;
+
+ msleep(100);
+ }
+
+ dev_err(fdev->dev,
+ "Timed out waiting for device to indicate RDY %u; aborting %s\n",
+ enabled, enabled ? "initialization" : "reset");
+ return -ETIMEDOUT;
+}
+
+/* Check CSTS and return an error if it is unreadable or has unexpected
+ * RDY value.
+ */
+static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+ u32 actual_rdy = csts & NVME_CSTS_RDY;
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+ if (actual_rdy != expected_rdy) {
+ dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Check that CSTS RDY has the expected value. Then write a new value to the CC
+ * register and wait for CSTS RDY to match the new CC ENABLE state.
+ */
+static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
+{
+ int rc = fun_check_csts_rdy(fdev, initial_rdy);
+
+ if (rc)
+ return rc;
+ writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
+ return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
+}
+
+static int fun_disable_ctrl(struct fun_dev *fdev)
+{
+ fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
+ return fun_update_cc_enable(fdev, 1);
+}
+
+static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
+ u32 admin_sqesz_log2)
+{
+ fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
+ (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
+ ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
+ NVME_CC_ENABLE;
+
+ return fun_update_cc_enable(fdev, 0);
+}
+
+static int fun_map_bars(struct fun_dev *fdev, const char *name)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ int err;
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't get PCI memory resources, err %d\n", err);
+ return err;
+ }
+
+ fdev->bar = pci_ioremap_bar(pdev, 0);
+ if (!fdev->bar) {
+ dev_err(&pdev->dev, "Couldn't map BAR 0\n");
+ pci_release_mem_regions(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void fun_unmap_bars(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ if (fdev->bar) {
+ iounmap(fdev->bar);
+ fdev->bar = NULL;
+ pci_release_mem_regions(pdev);
+ }
+}
+
+static int fun_set_dma_masks(struct device *dev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err)
+ dev_err(dev, "DMA mask configuration failed, err %d\n", err);
+ return err;
+}
+
+static irqreturn_t fun_admin_irq(int irq, void *data)
+{
+ struct fun_queue *funq = data;
+
+ return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
+ void *entry, const struct fun_cqe_info *info)
+{
+ const struct fun_admin_rsp_common *rsp_common = entry;
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_cmd_ctx *cmd_ctx;
+ int cpu;
+ u16 cid;
+
+ if (info->sqhd == cpu_to_be16(0xffff)) {
+ dev_dbg(fdev->dev, "adminq event");
+ if (fdev->adminq_cb)
+ fdev->adminq_cb(fdev, entry);
+ return;
+ }
+
+ cid = be16_to_cpu(rsp_common->cid);
+ dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
+ rsp_common->op, rsp_common->ret);
+
+ cmd_ctx = &fdev->cmd_ctx[cid];
+ if (cmd_ctx->cpu < 0) {
+ dev_err(fdev->dev,
+ "admin CQE with CID=%u, op=%u does not match a pending command\n",
+ cid, rsp_common->op);
+ return;
+ }
+
+ if (cmd_ctx->cb)
+ cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
+
+ cpu = cmd_ctx->cpu;
+ cmd_ctx->cpu = -1;
+ sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
+}
+
+static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
+{
+ unsigned int i;
+
+ fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
+ if (!fdev->cmd_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < ntags; i++)
+ fdev->cmd_ctx[i].cpu = -1;
+
+ return 0;
+}
+
+/* Allocate and enable an admin queue and assign it the first IRQ vector. */
+static int fun_enable_admin_queue(struct fun_dev *fdev,
+ const struct fun_dev_params *areq)
+{
+ struct fun_queue_alloc_req qreq = {
+ .cqe_size_log2 = areq->cqe_size_log2,
+ .sqe_size_log2 = areq->sqe_size_log2,
+ .cq_depth = areq->cq_depth,
+ .sq_depth = areq->sq_depth,
+ .rq_depth = areq->rq_depth,
+ };
+ unsigned int ntags = areq->sq_depth - 1;
+ struct fun_queue *funq;
+ int rc;
+
+ if (fdev->admin_q)
+ return -EEXIST;
+
+ if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
+ areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->cq_depth > AQA_MAX_QUEUE_SIZE)
+ return -EINVAL;
+
+ fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
+ if (!fdev->admin_q)
+ return -ENOMEM;
+
+ rc = fun_init_cmd_ctx(fdev, ntags);
+ if (rc)
+ goto free_q;
+
+ rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
+ GFP_KERNEL, dev_to_node(fdev->dev));
+ if (rc)
+ goto free_cmd_ctx;
+
+ funq = fdev->admin_q;
+ funq->cq_vector = 0;
+ rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
+ if (rc)
+ goto free_sbq;
+
+ fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
+ fdev->adminq_cb = areq->event_cb;
+
+ writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
+ (funq->cq_depth - 1) << AQA_ACQS_SHIFT,
+ fdev->bar + NVME_REG_AQA);
+
+ writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
+ writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
+
+ rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
+ if (rc)
+ goto free_irq;
+
+ if (areq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto disable_ctrl;
+
+ funq_rq_post(funq);
+ }
+
+ return 0;
+
+disable_ctrl:
+ fun_disable_ctrl(fdev);
+free_irq:
+ fun_free_irq(funq);
+free_sbq:
+ sbitmap_queue_free(&fdev->admin_sbq);
+free_cmd_ctx:
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+free_q:
+ fun_free_queue(fdev->admin_q);
+ fdev->admin_q = NULL;
+ return rc;
+}
+
+static void fun_disable_admin_queue(struct fun_dev *fdev)
+{
+ struct fun_queue *admq = fdev->admin_q;
+
+ if (!admq)
+ return;
+
+ fun_disable_ctrl(fdev);
+
+ fun_free_irq(admq);
+ __fun_process_cq(admq, 0);
+
+ sbitmap_queue_free(&fdev->admin_sbq);
+
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+
+ fun_free_queue(admq);
+ fdev->admin_q = NULL;
+}
+
+/* Return %true if the admin queue has stopped servicing commands as can be
+ * detected through registers. This isn't exhaustive and may provide false
+ * negatives.
+ */
+static bool fun_adminq_stopped(struct fun_dev *fdev)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
+}
+
+static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
+{
+ struct sbitmap_queue *sbq = &fdev->admin_sbq;
+ struct sbq_wait_state *ws = &sbq->ws[0];
+ DEFINE_SBQ_WAIT(wait);
+ int tag;
+
+ for (;;) {
+ sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
+ if (fdev->suppress_cmds) {
+ tag = -ESHUTDOWN;
+ break;
+ }
+ tag = sbitmap_queue_get(sbq, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ sbitmap_finish_wait(sbq, ws, &wait);
+ return tag;
+}
+
+/* Submit an asynchronous admin command. Caller is responsible for implementing
+ * any waiting or timeout. Upon command completion the callback @cb is called.
+ */
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok)
+{
+ struct fun_queue *funq = fdev->admin_q;
+ unsigned int cmdsize = cmd->len8 * 8;
+ struct fun_cmd_ctx *cmd_ctx;
+ int tag, cpu, rc = 0;
+
+ if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
+ return -EMSGSIZE;
+
+ tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
+ if (tag < 0) {
+ if (!wait_ok)
+ return -EAGAIN;
+ tag = fun_wait_for_tag(fdev, &cpu);
+ if (tag < 0)
+ return tag;
+ }
+
+ cmd->cid = cpu_to_be16(tag);
+
+ cmd_ctx = &fdev->cmd_ctx[tag];
+ cmd_ctx->cb = cb;
+ cmd_ctx->cb_data = cb_data;
+
+ spin_lock(&funq->sq_lock);
+
+ if (unlikely(fdev->suppress_cmds)) {
+ rc = -ESHUTDOWN;
+ sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
+ } else {
+ cmd_ctx->cpu = cpu;
+ memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
+
+ dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
+ cmd);
+
+ if (++funq->sq_tail == funq->sq_depth)
+ funq->sq_tail = 0;
+ writel(funq->sq_tail, funq->sq_db);
+ }
+ spin_unlock(&funq->sq_lock);
+ return rc;
+}
+
+/* Abandon a pending admin command by clearing the issuer's callback data.
+ * Failure indicates that the command either has already completed or its
+ * completion is racing with this call.
+ */
+static bool fun_abandon_admin_cmd(struct fun_dev *fd,
+ const struct fun_admin_req_common *cmd,
+ void *cb_data)
+{
+ u16 cid = be16_to_cpu(cmd->cid);
+ struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
+
+ return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
+}
+
+/* Stop submission of new admin commands and wake up any processes waiting for
+ * tags. Already submitted commands are left to complete or time out.
+ */
+static void fun_admin_stop(struct fun_dev *fdev)
+{
+ spin_lock(&fdev->admin_q->sq_lock);
+ fdev->suppress_cmds = true;
+ spin_unlock(&fdev->admin_q->sq_lock);
+ sbitmap_queue_wake_all(&fdev->admin_sbq);
+}
+
+/* The callback for synchronous execution of admin commands. It copies the
+ * command response to the caller's buffer and signals completion.
+ */
+static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
+{
+ const struct fun_admin_rsp_common *rsp_common = rsp;
+ struct fun_sync_cmd_ctx *ctx = cb_data;
+
+ if (!ctx)
+ return; /* command issuer timed out and left */
+ if (ctx->rsp_buf) {
+ unsigned int rsp_len = rsp_common->len8 * 8;
+
+ if (unlikely(rsp_len > ctx->rsp_len)) {
+ dev_err(fd->dev,
+ "response for op %u is %uB > response buffer %uB\n",
+ rsp_common->op, rsp_len, ctx->rsp_len);
+ rsp_len = ctx->rsp_len;
+ }
+ memcpy(ctx->rsp_buf, rsp, rsp_len);
+ }
+ ctx->rsp_status = rsp_common->ret;
+ complete(&ctx->compl);
+}
+
+/* Submit a synchronous admin command. */
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout)
+{
+ struct fun_sync_cmd_ctx ctx = {
+ .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
+ .rsp_buf = rsp,
+ .rsp_len = rspsize,
+ };
+ unsigned int cmdlen = cmd->len8 * 8;
+ unsigned long jiffies_left;
+ int ret;
+
+ ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
+ true);
+ if (ret)
+ return ret;
+
+ if (!timeout)
+ timeout = FUN_ADMIN_CMD_TO_MS;
+
+ jiffies_left = wait_for_completion_timeout(&ctx.compl,
+ msecs_to_jiffies(timeout));
+ if (!jiffies_left) {
+ /* The command timed out. Attempt to cancel it so we can return.
+ * But if the command is in the process of completing we'll
+ * wait for it.
+ */
+ if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
+ dev_err(fdev->dev, "admin command timed out: %*ph\n",
+ cmdlen, cmd);
+ fun_admin_stop(fdev);
+ /* see if the timeout was due to a queue failure */
+ if (fun_adminq_stopped(fdev))
+ dev_err(fdev->dev,
+ "device does not accept admin commands\n");
+
+ return -ETIMEDOUT;
+ }
+ wait_for_completion(&ctx.compl);
+ }
+
+ if (ctx.rsp_status) {
+ dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
+ ctx.rsp_status, cmdlen, cmd);
+ }
+
+ return -ctx.rsp_status;
+}
+EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
+
+/* Return the number of device resources of the requested type. */
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
+{
+ union {
+ struct fun_admin_res_count_req req;
+ struct fun_admin_res_count_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
+ cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
+ 0, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
+}
+EXPORT_SYMBOL_GPL(fun_get_res_count);
+
+/* Request that the instance of resource @res with the given id be deleted. */
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id)
+{
+ struct fun_admin_generic_destroy_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
+ .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
+ flags, id)
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_res_destroy);
+
+/* Bind two entities of the given types and IDs. */
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1)
+{
+ struct {
+ struct fun_admin_bind_req req;
+ struct fun_admin_bind_entry entry[2];
+ } cmd = {
+ .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ sizeof(cmd)),
+ .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
+ .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_bind);
+
+static int fun_get_dev_limits(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ unsigned int cq_count, sq_count, num_dbs;
+ int rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
+ if (rc < 0)
+ return rc;
+ cq_count = rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
+ if (rc < 0)
+ return rc;
+ sq_count = rc;
+
+ /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
+ * device must provide additional queues.
+ */
+ if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
+ return -EINVAL;
+
+ /* Calculate the max QID based on SQ/CQ/doorbell counts.
+ * SQ/CQ doorbells alternate.
+ */
+ num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) /
+ (fdev->db_stride * 4);
+ fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
+ fdev->kern_end_qid = fdev->max_qid + 1;
+ return 0;
+}
+
+/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
+static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
+{
+ int vecs, num_msix = pci_msix_vec_count(pdev);
+
+ if (num_msix < 0)
+ return num_msix;
+ if (min_vecs > num_msix)
+ return -ERANGE;
+
+ vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
+ if (vecs > 0) {
+ dev_info(&pdev->dev,
+ "Allocated %d IRQ vectors of %d requested\n",
+ vecs, num_msix);
+ } else {
+ dev_err(&pdev->dev,
+ "Unable to allocate at least %u IRQ vectors\n",
+ min_vecs);
+ }
+ return vecs;
+}
+
+/* Allocate and initialize the IRQ manager state. */
+static int fun_alloc_irq_mgr(struct fun_dev *fdev)
+{
+ fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
+ if (!fdev->irq_map)
+ return -ENOMEM;
+
+ spin_lock_init(&fdev->irqmgr_lock);
+ /* mark IRQ 0 allocated, it is used by the admin queue */
+ __set_bit(0, fdev->irq_map);
+ fdev->irqs_avail = fdev->num_irqs - 1;
+ return 0;
+}
+
+/* Reserve @nirqs of the currently available IRQs and return their indices. */
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
+{
+ unsigned int b, n = 0;
+ int err = -ENOSPC;
+
+ if (!nirqs)
+ return 0;
+
+ spin_lock(&fdev->irqmgr_lock);
+ if (nirqs > fdev->irqs_avail)
+ goto unlock;
+
+ for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
+ __set_bit(b, fdev->irq_map);
+ irq_indices[n++] = b;
+ if (n >= nirqs)
+ break;
+ }
+
+ WARN_ON(n < nirqs);
+ fdev->irqs_avail -= n;
+ err = n;
+unlock:
+ spin_unlock(&fdev->irqmgr_lock);
+ return err;
+}
+EXPORT_SYMBOL(fun_reserve_irqs);
+
+/* Release @nirqs previously allocated IRQS with the supplied indices. */
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices)
+{
+ unsigned int i;
+
+ spin_lock(&fdev->irqmgr_lock);
+ for (i = 0; i < nirqs; i++)
+ __clear_bit(irq_indices[i], fdev->irq_map);
+ fdev->irqs_avail += nirqs;
+ spin_unlock(&fdev->irqmgr_lock);
+}
+EXPORT_SYMBOL(fun_release_irqs);
+
+static void fun_serv_handler(struct work_struct *work)
+{
+ struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
+
+ if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ return;
+ if (fd->serv_cb)
+ fd->serv_cb(fd);
+}
+
+void fun_serv_stop(struct fun_dev *fd)
+{
+ set_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ cancel_work_sync(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_stop);
+
+void fun_serv_restart(struct fun_dev *fd)
+{
+ clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ if (fd->service_flags)
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_restart);
+
+void fun_serv_sched(struct fun_dev *fd)
+{
+ if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_sched);
+
+/* Check and try to get the device into a proper state for initialization,
+ * i.e., CSTS.RDY = CC.EN = 0.
+ */
+static int sanitize_dev(struct fun_dev *fdev)
+{
+ int rc;
+
+ fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
+ fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
+
+ /* First get RDY to agree with the current EN. Give RDY the opportunity
+ * to complete a potential recent EN change.
+ */
+ rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
+ if (rc)
+ return rc;
+
+ /* Next, reset the device if EN is currently 1. */
+ if (fdev->cc_reg & NVME_CC_ENABLE)
+ rc = fun_disable_ctrl(fdev);
+
+ return rc;
+}
+
+/* Undo the device initialization of fun_dev_enable(). */
+void fun_dev_disable(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
+ fdev->fw_handle);
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ }
+
+ fun_disable_admin_queue(fdev);
+
+ bitmap_free(fdev->irq_map);
+ pci_free_irq_vectors(pdev);
+
+ pci_clear_master(pdev);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+
+ fun_unmap_bars(fdev);
+}
+EXPORT_SYMBOL(fun_dev_disable);
+
+/* Perform basic initialization of a device, including
+ * - PCI config space setup and BAR0 mapping
+ * - interrupt management initialization
+ * - 1 admin queue setup
+ * - determination of some device limits, such as number of queues.
+ */
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name)
+{
+ int rc;
+
+ fdev->dev = &pdev->dev;
+ rc = fun_map_bars(fdev, name);
+ if (rc)
+ return rc;
+
+ rc = fun_set_dma_masks(fdev->dev);
+ if (rc)
+ goto unmap;
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
+ goto unmap;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ rc = sanitize_dev(fdev);
+ if (rc)
+ goto disable_dev;
+
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
+ fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
+ fdev->dbs = fdev->bar + NVME_REG_DBS;
+
+ INIT_WORK(&fdev->service_task, fun_serv_handler);
+ fdev->service_flags = FUN_SERV_DISABLED;
+ fdev->serv_cb = areq->serv_cb;
+
+ rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */
+ if (rc < 0)
+ goto disable_dev;
+ fdev->num_irqs = rc;
+
+ rc = fun_alloc_irq_mgr(fdev);
+ if (rc)
+ goto free_irqs;
+
+ pci_set_master(pdev);
+ rc = fun_enable_admin_queue(fdev, areq);
+ if (rc)
+ goto free_irq_mgr;
+
+ rc = fun_get_dev_limits(fdev);
+ if (rc < 0)
+ goto disable_admin;
+
+ pci_save_state(pdev);
+ pci_set_drvdata(pdev, fdev);
+ pcie_print_link_status(pdev);
+ dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
+ fdev->q_depth, fdev->db_stride, fdev->max_qid,
+ fdev->kern_end_qid);
+ return 0;
+
+disable_admin:
+ fun_disable_admin_queue(fdev);
+free_irq_mgr:
+ pci_clear_master(pdev);
+ bitmap_free(fdev->irq_map);
+free_irqs:
+ pci_free_irq_vectors(pdev);
+disable_dev:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+unmap:
+ fun_unmap_bars(fdev);
+ return rc;
+}
+EXPORT_SYMBOL(fun_dev_enable);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Core services driver for Fungible devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.h b/drivers/net/ethernet/fungible/funcore/fun_dev.h
new file mode 100644
index 000000000000..9e8c17ce8887
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNDEV_H
+#define _FUNDEV_H
+
+#include <linux/sbitmap.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+#include "fun_hci.h"
+
+struct pci_dev;
+struct fun_dev;
+struct fun_queue;
+struct fun_cmd_ctx;
+struct fun_queue_alloc_req;
+
+/* doorbell fields */
+enum {
+ FUN_DB_QIDX_S = 0,
+ FUN_DB_INTCOAL_ENTRIES_S = 16,
+ FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
+ FUN_DB_INTCOAL_USEC_S = 23,
+ FUN_DB_INTCOAL_USEC_M = 0x7f,
+ FUN_DB_IRQ_S = 30,
+ FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
+ FUN_DB_IRQ_ARM_S = 31,
+ FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
+};
+
+/* Callback for asynchronous admin commands.
+ * Invoked on reception of command response.
+ */
+typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
+ void *cb_data);
+
+/* Callback for events/notifications received by an admin queue. */
+typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
+
+/* Callback for pending work handled by the service task. */
+typedef void (*fun_serv_cb)(struct fun_dev *fd);
+
+/* service task flags */
+enum {
+ FUN_SERV_DISABLED, /* service task is disabled */
+ FUN_SERV_FIRST_AVAIL
+};
+
+/* Driver state associated with a PCI function. */
+struct fun_dev {
+ struct device *dev;
+
+ void __iomem *bar; /* start of BAR0 mapping */
+ u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
+
+ /* admin queue */
+ struct fun_queue *admin_q;
+ struct sbitmap_queue admin_sbq;
+ struct fun_cmd_ctx *cmd_ctx;
+ fun_admin_event_cb adminq_cb;
+ bool suppress_cmds; /* if set don't write commands to SQ */
+
+ /* address increment between consecutive doorbells, in 4B units */
+ unsigned int db_stride;
+
+ /* SW versions of device registers */
+ u32 cc_reg; /* CC register */
+ u64 cap_reg; /* CAPability register */
+
+ unsigned int q_depth; /* max queue depth supported by device */
+ unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
+ unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
+
+ unsigned int fw_handle;
+
+ /* IRQ manager */
+ unsigned int num_irqs;
+ unsigned int irqs_avail;
+ spinlock_t irqmgr_lock;
+ unsigned long *irq_map;
+
+ /* The service task handles work that needs a process context */
+ struct work_struct service_task;
+ unsigned long service_flags;
+ fun_serv_cb serv_cb;
+};
+
+struct fun_dev_params {
+ u8 cqe_size_log2; /* admin q CQE size */
+ u8 sqe_size_log2; /* admin q SQE size */
+
+ /* admin q depths */
+ u16 cq_depth;
+ u16 sq_depth;
+ u16 rq_depth;
+
+ u16 min_msix; /* min vectors needed by requesting driver */
+
+ fun_admin_event_cb event_cb;
+ fun_serv_cb serv_cb;
+};
+
+/* Return the BAR address of a doorbell. */
+static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
+ unsigned int db_index)
+{
+ return &fdev->dbs[db_index * fdev->db_stride];
+}
+
+/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
+ * SQs have even DB indices.
+ */
+static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
+ unsigned int sqid)
+{
+ return fun_db_addr(fdev, sqid * 2);
+}
+
+static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
+ unsigned int cqid)
+{
+ return fun_db_addr(fdev, cqid * 2 + 1);
+}
+
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id);
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1);
+
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok);
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout);
+
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name);
+void fun_dev_disable(struct fun_dev *fdev);
+
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+
+void fun_serv_stop(struct fun_dev *fd);
+void fun_serv_restart(struct fun_dev *fd);
+void fun_serv_sched(struct fun_dev *fd);
+
+#endif /* _FUNDEV_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
new file mode 100644
index 000000000000..257203e94b68
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -0,0 +1,1202 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUN_HCI_H
+#define __FUN_HCI_H
+
+enum {
+ FUN_HCI_ID_INVALID = 0xffffffff,
+};
+
+enum fun_admin_op {
+ FUN_ADMIN_OP_BIND = 0x1,
+ FUN_ADMIN_OP_EPCQ = 0x11,
+ FUN_ADMIN_OP_EPSQ = 0x12,
+ FUN_ADMIN_OP_PORT = 0x13,
+ FUN_ADMIN_OP_ETH = 0x14,
+ FUN_ADMIN_OP_VI = 0x15,
+ FUN_ADMIN_OP_SWUPGRADE = 0x1f,
+ FUN_ADMIN_OP_RSS = 0x21,
+ FUN_ADMIN_OP_ADI = 0x25,
+ FUN_ADMIN_OP_KTLS = 0x26,
+};
+
+enum {
+ FUN_REQ_COMMON_FLAG_RSP = 0x1,
+ FUN_REQ_COMMON_FLAG_HEAD_WB = 0x2,
+ FUN_REQ_COMMON_FLAG_INT = 0x4,
+ FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF = 0x8,
+};
+
+struct fun_admin_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+#define FUN_ADMIN_REQ_COMMON_INIT(_op, _len8, _flags, _suboff8, _cid) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len8), .flags = cpu_to_be16(_flags), \
+ .suboff8 = (_suboff8), .cid = cpu_to_be16(_cid), \
+ }
+
+#define FUN_ADMIN_REQ_COMMON_INIT2(_op, _len) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len) / 8, \
+ }
+
+struct fun_admin_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_admin_write48_req {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_WRITE48_REQ_KEY_S 56U
+#define FUN_ADMIN_WRITE48_REQ_KEY_M 0xff
+#define FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_KEY_S)
+
+#define FUN_ADMIN_WRITE48_REQ_DATA_S 0U
+#define FUN_ADMIN_WRITE48_REQ_DATA_M 0xffffffffffff
+#define FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_DATA_S)
+
+#define FUN_ADMIN_WRITE48_REQ_INIT(key, data) \
+ (struct fun_admin_write48_req) { \
+ .key_to_data = cpu_to_be64( \
+ FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(key) | \
+ FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(data)), \
+ }
+
+struct fun_admin_write48_rsp {
+ __be64 key_to_data;
+};
+
+struct fun_admin_read48_req {
+ __be64 key_pack;
+};
+
+#define FUN_ADMIN_READ48_REQ_KEY_S 56U
+#define FUN_ADMIN_READ48_REQ_KEY_M 0xff
+#define FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_READ48_REQ_KEY_S)
+
+#define FUN_ADMIN_READ48_REQ_INIT(key) \
+ (struct fun_admin_read48_req) { \
+ .key_pack = \
+ cpu_to_be64(FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(key)), \
+ }
+
+struct fun_admin_read48_rsp {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_READ48_RSP_KEY_S 56U
+#define FUN_ADMIN_READ48_RSP_KEY_M 0xff
+#define FUN_ADMIN_READ48_RSP_KEY_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_KEY_S) & \
+ FUN_ADMIN_READ48_RSP_KEY_M)
+
+#define FUN_ADMIN_READ48_RSP_RET_S 48U
+#define FUN_ADMIN_READ48_RSP_RET_M 0xff
+#define FUN_ADMIN_READ48_RSP_RET_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_RET_S) & \
+ FUN_ADMIN_READ48_RSP_RET_M)
+
+#define FUN_ADMIN_READ48_RSP_DATA_S 0U
+#define FUN_ADMIN_READ48_RSP_DATA_M 0xffffffffffff
+#define FUN_ADMIN_READ48_RSP_DATA_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_DATA_S) & \
+ FUN_ADMIN_READ48_RSP_DATA_M)
+
+enum fun_admin_bind_type {
+ FUN_ADMIN_BIND_TYPE_EPCQ = 0x1,
+ FUN_ADMIN_BIND_TYPE_EPSQ = 0x2,
+ FUN_ADMIN_BIND_TYPE_PORT = 0x3,
+ FUN_ADMIN_BIND_TYPE_RSS = 0x4,
+ FUN_ADMIN_BIND_TYPE_VI = 0x5,
+ FUN_ADMIN_BIND_TYPE_ETH = 0x6,
+};
+
+struct fun_admin_bind_entry {
+ __u8 type;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+#define FUN_ADMIN_BIND_ENTRY_INIT(_type, _id) \
+ (struct fun_admin_bind_entry) { \
+ .type = (_type), .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_bind_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_bind_entry entry[];
+};
+
+struct fun_admin_bind_rsp {
+ struct fun_admin_rsp_common bind_rsp_common;
+};
+
+struct fun_admin_simple_subop {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 data;
+};
+
+#define FUN_ADMIN_SIMPLE_SUBOP_INIT(_subop, _flags, _data) \
+ (struct fun_admin_simple_subop) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .data = cpu_to_be32(_data), \
+ }
+
+enum fun_admin_subop {
+ FUN_ADMIN_SUBOP_CREATE = 0x10,
+ FUN_ADMIN_SUBOP_DESTROY = 0x11,
+ FUN_ADMIN_SUBOP_MODIFY = 0x12,
+ FUN_ADMIN_SUBOP_RES_COUNT = 0x14,
+ FUN_ADMIN_SUBOP_READ = 0x15,
+ FUN_ADMIN_SUBOP_WRITE = 0x16,
+ FUN_ADMIN_SUBOP_NOTIFY = 0x17,
+};
+
+enum {
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR = 0x1,
+};
+
+struct fun_admin_generic_destroy_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop destroy;
+};
+
+struct fun_admin_generic_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+struct fun_admin_res_count_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop count;
+};
+
+struct fun_admin_res_count_rsp {
+ struct fun_admin_rsp_common common;
+ struct fun_admin_simple_subop count;
+};
+
+enum {
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_EPCQ = 0x2,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_ENTRY_WR_TPH = 0x4,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_SL_WR_TPH = 0x8,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_NOARM = 0x200,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_DROP_ON_OVERFLOW = 0x400,
+};
+
+struct fun_admin_epcq_req {
+ struct fun_admin_req_common common;
+ union epcq_req_subop {
+ struct fun_admin_epcq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epsqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address;
+
+ __be16 tailroom; /* per packet tailroom in bytes */
+ __u8 headroom; /* per packet headroom in 2B units */
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __be16 tph_cpuid;
+ __u8 rsvd3[6];
+ } create;
+
+ struct fun_admin_epcq_modify_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be16 headroom; /* headroom in bytes */
+ __u8 rsvd1[6];
+ } modify;
+ } u;
+};
+
+#define FUN_ADMIN_EPCQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epsqid, _entry_size_log2, _nentries, _address, \
+ _tailroom, _headroom, _intcoal_kbytes, _intcoal_holdoff_nentries, \
+ _intcoal_holdoff_usecs, _intid, _scan_start_id, _scan_end_id, \
+ _tph_cpuid) \
+ (struct fun_admin_epcq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epsqid = cpu_to_be32(_epsqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .tailroom = cpu_to_be16(_tailroom), .headroom = _headroom, \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ }
+
+#define FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(_subop, _flags, _id, _headroom) \
+ (struct fun_admin_epcq_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .headroom = cpu_to_be16(_headroom), \
+ }
+
+enum {
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_EPSQ = 0x2,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_ENTRY_RD_TPH = 0x4,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_GL_RD_TPH = 0x8,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS = 0x10,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS_TPH = 0x20,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_EPCQ = 0x40,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_NO_CMPL = 0x200,
+};
+
+struct fun_admin_epsq_req {
+ struct fun_admin_req_common common;
+
+ union epsq_req_subop {
+ struct fun_admin_epsq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epcqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address; /* DMA address of epsq */
+
+ __u8 rsvd2[3];
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __u8 rsvd3[4];
+ __be16 tph_cpuid;
+ __u8 buf_size_log2; /* log2 of RQ buffer size */
+ __u8 head_wb_size_log2; /* log2 of head write back size */
+
+ __be64 head_wb_address; /* DMA address for head writeback */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_EPSQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epcqid, _entry_size_log2, _nentries, _address, \
+ _intcoal_kbytes, _intcoal_holdoff_nentries, _intcoal_holdoff_usecs, \
+ _intid, _scan_start_id, _scan_end_id, _tph_cpuid, _buf_size_log2, \
+ _head_wb_size_log2, _head_wb_address) \
+ (struct fun_admin_epsq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epcqid = cpu_to_be32(_epcqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ .buf_size_log2 = _buf_size_log2, \
+ .head_wb_size_log2 = _head_wb_size_log2, \
+ .head_wb_address = cpu_to_be64(_head_wb_address), \
+ }
+
+enum {
+ FUN_PORT_CAP_OFFLOADS = 0x1,
+ FUN_PORT_CAP_STATS = 0x2,
+ FUN_PORT_CAP_LOOPBACK = 0x4,
+ FUN_PORT_CAP_VPORT = 0x8,
+ FUN_PORT_CAP_TX_PAUSE = 0x10,
+ FUN_PORT_CAP_RX_PAUSE = 0x20,
+ FUN_PORT_CAP_AUTONEG = 0x40,
+ FUN_PORT_CAP_RSS = 0x80,
+ FUN_PORT_CAP_VLAN_OFFLOADS = 0x100,
+ FUN_PORT_CAP_ENCAP_OFFLOADS = 0x200,
+ FUN_PORT_CAP_1000_X = 0x1000,
+ FUN_PORT_CAP_10G_R = 0x2000,
+ FUN_PORT_CAP_40G_R4 = 0x4000,
+ FUN_PORT_CAP_25G_R = 0x8000,
+ FUN_PORT_CAP_50G_R2 = 0x10000,
+ FUN_PORT_CAP_50G_R = 0x20000,
+ FUN_PORT_CAP_100G_R4 = 0x40000,
+ FUN_PORT_CAP_100G_R2 = 0x80000,
+ FUN_PORT_CAP_200G_R4 = 0x100000,
+ FUN_PORT_CAP_FEC_NONE = 0x10000000,
+ FUN_PORT_CAP_FEC_FC = 0x20000000,
+ FUN_PORT_CAP_FEC_RS = 0x40000000,
+};
+
+enum fun_port_brkout_mode {
+ FUN_PORT_BRKMODE_NA = 0x0,
+ FUN_PORT_BRKMODE_NONE = 0x1,
+ FUN_PORT_BRKMODE_2X = 0x2,
+ FUN_PORT_BRKMODE_4X = 0x3,
+};
+
+enum {
+ FUN_PORT_SPEED_AUTO = 0x0,
+ FUN_PORT_SPEED_10M = 0x1,
+ FUN_PORT_SPEED_100M = 0x2,
+ FUN_PORT_SPEED_1G = 0x4,
+ FUN_PORT_SPEED_10G = 0x8,
+ FUN_PORT_SPEED_25G = 0x10,
+ FUN_PORT_SPEED_40G = 0x20,
+ FUN_PORT_SPEED_50G = 0x40,
+ FUN_PORT_SPEED_100G = 0x80,
+ FUN_PORT_SPEED_200G = 0x100,
+};
+
+enum fun_port_duplex_mode {
+ FUN_PORT_FULL_DUPLEX = 0x0,
+ FUN_PORT_HALF_DUPLEX = 0x1,
+};
+
+enum {
+ FUN_PORT_FEC_NA = 0x0,
+ FUN_PORT_FEC_OFF = 0x1,
+ FUN_PORT_FEC_RS = 0x2,
+ FUN_PORT_FEC_FC = 0x4,
+ FUN_PORT_FEC_AUTO = 0x8,
+};
+
+enum fun_port_link_status {
+ FUN_PORT_LINK_UP = 0x0,
+ FUN_PORT_LINK_UP_WITH_ERR = 0x1,
+ FUN_PORT_LINK_DOWN = 0x2,
+};
+
+enum fun_port_led_type {
+ FUN_PORT_LED_OFF = 0x0,
+ FUN_PORT_LED_AMBER = 0x1,
+ FUN_PORT_LED_GREEN = 0x2,
+ FUN_PORT_LED_BEACON_ON = 0x3,
+ FUN_PORT_LED_BEACON_OFF = 0x4,
+};
+
+enum {
+ FUN_PORT_FLAG_MAC_DOWN = 0x1,
+ FUN_PORT_FLAG_MAC_UP = 0x2,
+ FUN_PORT_FLAG_NH_DOWN = 0x4,
+ FUN_PORT_FLAG_NH_UP = 0x8,
+};
+
+enum {
+ FUN_PORT_FLAG_ENABLE_NOTIFY = 0x1,
+};
+
+enum fun_port_lane_attr {
+ FUN_PORT_LANE_1 = 0x1,
+ FUN_PORT_LANE_2 = 0x2,
+ FUN_PORT_LANE_4 = 0x4,
+ FUN_PORT_LANE_SPEED_10G = 0x100,
+ FUN_PORT_LANE_SPEED_25G = 0x200,
+ FUN_PORT_LANE_SPEED_50G = 0x400,
+ FUN_PORT_LANE_SPLIT = 0x8000,
+};
+
+enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
+};
+
+enum fun_admin_port_key {
+ FUN_ADMIN_PORT_KEY_ILLEGAL = 0x0,
+ FUN_ADMIN_PORT_KEY_MTU = 0x1,
+ FUN_ADMIN_PORT_KEY_FEC = 0x2,
+ FUN_ADMIN_PORT_KEY_SPEED = 0x3,
+ FUN_ADMIN_PORT_KEY_DEBOUNCE = 0x4,
+ FUN_ADMIN_PORT_KEY_DUPLEX = 0x5,
+ FUN_ADMIN_PORT_KEY_MACADDR = 0x6,
+ FUN_ADMIN_PORT_KEY_LINKMODE = 0x7,
+ FUN_ADMIN_PORT_KEY_BREAKOUT = 0x8,
+ FUN_ADMIN_PORT_KEY_ENABLE = 0x9,
+ FUN_ADMIN_PORT_KEY_DISABLE = 0xa,
+ FUN_ADMIN_PORT_KEY_ERR_DISABLE = 0xb,
+ FUN_ADMIN_PORT_KEY_CAPABILITIES = 0xc,
+ FUN_ADMIN_PORT_KEY_LP_CAPABILITIES = 0xd,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW = 0xe,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH = 0xf,
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS = 0x10,
+ FUN_ADMIN_PORT_KEY_LED = 0x11,
+ FUN_ADMIN_PORT_KEY_ADVERT = 0x12,
+};
+
+struct fun_subop_imm {
+ __u8 subop; /* see fun_data_subop enum */
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 len;
+
+ __u8 data[];
+};
+
+enum fun_subop_sgl_flags {
+ FUN_SUBOP_SGL_USE_OFF8 = 0x1,
+ FUN_SUBOP_FLAG_FREE_BUF = 0x2,
+ FUN_SUBOP_FLAG_IS_REFBUF = 0x4,
+ FUN_SUBOP_SGL_FLAG_LOCAL = 0x8,
+};
+
+enum fun_data_op {
+ FUN_DATAOP_INVALID = 0x0,
+ FUN_DATAOP_SL = 0x1, /* scatter */
+ FUN_DATAOP_GL = 0x2, /* gather */
+ FUN_DATAOP_SGL = 0x3, /* scatter-gather */
+ FUN_DATAOP_IMM = 0x4, /* immediate data */
+ FUN_DATAOP_RQBUF = 0x8, /* rq buffer */
+};
+
+struct fun_dataop_gl {
+ __u8 subop;
+ __u8 flags;
+ __be16 sgl_off;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+static inline void fun_dataop_gl_init(struct fun_dataop_gl *s, u8 flags,
+ u16 sgl_off, u32 sgl_len, u64 sgl_data)
+{
+ s->subop = FUN_DATAOP_GL;
+ s->flags = flags;
+ s->sgl_off = cpu_to_be16(sgl_off);
+ s->sgl_len = cpu_to_be32(sgl_len);
+ s->sgl_data = cpu_to_be64(sgl_data);
+}
+
+struct fun_dataop_imm {
+ __u8 subop;
+ __u8 flags;
+ __be16 rsvd0;
+ __be32 sgl_len;
+};
+
+struct fun_subop_sgl {
+ __u8 subop;
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+#define FUN_SUBOP_SGL_INIT(_subop, _flags, _nsgl, _sgl_len, _sgl_data) \
+ (struct fun_subop_sgl) { \
+ .subop = (_subop), .flags = (_flags), .nsgl = (_nsgl), \
+ .sgl_len = cpu_to_be32(_sgl_len), \
+ .sgl_data = cpu_to_be64(_sgl_data), \
+ }
+
+struct fun_dataop_rqbuf {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 cid;
+ __be32 bufoff;
+};
+
+struct fun_dataop_hdr {
+ __u8 nsgl;
+ __u8 flags;
+ __u8 ngather;
+ __u8 nscatter;
+ __be32 total_len;
+
+ struct fun_dataop_imm imm[];
+};
+
+#define FUN_DATAOP_HDR_INIT(_nsgl, _flags, _ngather, _nscatter, _total_len) \
+ (struct fun_dataop_hdr) { \
+ .nsgl = _nsgl, .flags = _flags, .ngather = _ngather, \
+ .nscatter = _nscatter, .total_len = cpu_to_be32(_total_len), \
+ }
+
+enum fun_port_inetaddr_event_type {
+ FUN_PORT_INETADDR_ADD = 0x1,
+ FUN_PORT_INETADDR_DEL = 0x2,
+};
+
+enum fun_port_inetaddr_addr_family {
+ FUN_PORT_INETADDR_IPV4 = 0x1,
+ FUN_PORT_INETADDR_IPV6 = 0x2,
+};
+
+struct fun_admin_port_req {
+ struct fun_admin_req_common common;
+
+ union port_req_subop {
+ struct fun_admin_port_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_port_write_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_req write48[];
+ } write;
+ struct fun_admin_port_read_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_req read48[];
+ } read;
+ struct fun_admin_port_inetaddr_event_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __u8 event_type;
+ __u8 addr_family;
+ __be32 id;
+
+ __u8 addr[];
+ } inetaddr_event;
+ } u;
+};
+
+#define FUN_ADMIN_PORT_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_WRITE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_write_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_READ_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_read_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_port_rsp {
+ struct fun_admin_rsp_common common;
+
+ union port_rsp_subop {
+ struct fun_admin_port_create_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be16 lport;
+ __u8 rsvd1[6];
+ } create;
+ struct fun_admin_port_write_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_rsp write48[];
+ } write;
+ struct fun_admin_port_read_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_rsp read48[];
+ } read;
+ struct fun_admin_port_inetaddr_event_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+ } inetaddr_event;
+ } u;
+};
+
+enum fun_xcvr_type {
+ FUN_XCVR_BASET = 0x0,
+ FUN_XCVR_CU = 0x1,
+ FUN_XCVR_SMF = 0x2,
+ FUN_XCVR_MMF = 0x3,
+ FUN_XCVR_AOC = 0x4,
+ FUN_XCVR_SFPP = 0x10, /* SFP+ or later */
+ FUN_XCVR_QSFPP = 0x11, /* QSFP+ or later */
+ FUN_XCVR_QSFPDD = 0x12, /* QSFP-DD */
+};
+
+struct fun_admin_port_notif {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 id;
+ __be32 speed; /* in 10 Mbps units */
+
+ __u8 link_state;
+ __u8 missed_events;
+ __u8 link_down_reason;
+ __u8 xcvr_type;
+ __u8 flow_ctrl;
+ __u8 fec;
+ __u8 active_lanes;
+ __u8 rsvd1;
+
+ __be64 advertising;
+
+ __be64 lp_advertising;
+};
+
+enum fun_eth_rss_const {
+ FUN_ETH_RSS_MAX_KEY_SIZE = 0x28,
+ FUN_ETH_RSS_MAX_INDIR_ENT = 0x40,
+};
+
+enum fun_eth_hash_alg {
+ FUN_ETH_RSS_ALG_INVALID = 0x0,
+ FUN_ETH_RSS_ALG_TOEPLITZ = 0x1,
+ FUN_ETH_RSS_ALG_CRC32 = 0x2,
+};
+
+struct fun_admin_rss_req {
+ struct fun_admin_req_common common;
+
+ union rss_req_subop {
+ struct fun_admin_rss_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 viid; /* VI flow id */
+
+ __be64 metadata[1];
+
+ __u8 alg;
+ __u8 keylen;
+ __u8 indir_nent;
+ __u8 rsvd2;
+ __be16 key_off;
+ __be16 indir_off;
+
+ struct fun_dataop_hdr dataop;
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_RSS_CREATE_REQ_INIT(_subop, _flags, _id, _viid, _alg, \
+ _keylen, _indir_nent, _key_off, \
+ _indir_off) \
+ (struct fun_admin_rss_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .viid = cpu_to_be32(_viid), \
+ .alg = _alg, .keylen = _keylen, .indir_nent = _indir_nent, \
+ .key_off = cpu_to_be16(_key_off), \
+ .indir_off = cpu_to_be16(_indir_off), \
+ }
+
+struct fun_admin_vi_req {
+ struct fun_admin_req_common common;
+
+ union vi_req_subop {
+ struct fun_admin_vi_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_VI_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_vi_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+struct fun_admin_eth_req {
+ struct fun_admin_req_common common;
+
+ union eth_req_subop {
+ struct fun_admin_eth_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_ETH_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_eth_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+enum {
+ FUN_ADMIN_SWU_UPGRADE_FLAG_INIT = 0x10,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_COMPLETE = 0x20,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_DOWNGRADE = 0x40,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ACTIVE_IMAGE = 0x80,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ASYNC = 0x1,
+};
+
+enum fun_admin_swu_subop {
+ FUN_ADMIN_SWU_SUBOP_GET_VERSION = 0x20,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE = 0x21,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE_DATA = 0x22,
+ FUN_ADMIN_SWU_SUBOP_GET_ALL_VERSIONS = 0x23,
+};
+
+struct fun_admin_swu_req {
+ struct fun_admin_req_common common;
+
+ union swu_req_subop {
+ struct fun_admin_swu_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 rsvd1;
+
+ __be64 image_size; /* upgrade image length */
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset; /* offset of data in this command */
+ __be32 size; /* total size of data in this command */
+ } upgrade_data;
+ } u;
+
+ struct fun_subop_sgl sgl[]; /* in, out buffers through sgl */
+};
+
+#define FUN_ADMIN_SWU_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_swu_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_REQ_INIT(_subop, _flags, _id, _fourcc, \
+ _image_size) \
+ (struct fun_admin_swu_upgrade_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .fourcc = cpu_to_be32(_fourcc), \
+ .image_size = cpu_to_be64(_image_size), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_DATA_REQ_INIT(_subop, _flags, _id, _offset, \
+ _size) \
+ (struct fun_admin_swu_upgrade_data_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .offset = cpu_to_be32(_offset), \
+ .size = cpu_to_be32(_size), \
+ }
+
+struct fun_admin_swu_rsp {
+ struct fun_admin_rsp_common common;
+
+ union swu_rsp_subop {
+ struct fun_admin_swu_create_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 status;
+
+ __be32 progress;
+ __be32 unused;
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset;
+ __be32 size;
+ } upgrade_data;
+ } u;
+};
+
+enum fun_ktls_version {
+ FUN_KTLS_TLSV2 = 0x20,
+ FUN_KTLS_TLSV3 = 0x30,
+};
+
+enum fun_ktls_cipher {
+ FUN_KTLS_CIPHER_AES_GCM_128 = 0x33,
+ FUN_KTLS_CIPHER_AES_GCM_256 = 0x34,
+ FUN_KTLS_CIPHER_AES_CCM_128 = 0x35,
+ FUN_KTLS_CIPHER_CHACHA20_POLY1305 = 0x36,
+};
+
+enum fun_ktls_modify_flags {
+ FUN_KTLS_MODIFY_REMOVE = 0x1,
+};
+
+struct fun_admin_ktls_create_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+#define FUN_ADMIN_KTLS_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_ktls_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_ktls_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+struct fun_admin_ktls_modify_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be64 tlsid;
+
+ __be32 tcp_seq;
+ __u8 version;
+ __u8 cipher;
+ __u8 rsvd1[2];
+
+ __u8 record_seq[8];
+
+ __u8 key[32];
+
+ __u8 iv[16];
+
+ __u8 salt[8];
+};
+
+#define FUN_ADMIN_KTLS_MODIFY_REQ_INIT(_subop, _flags, _id, _tlsid, _tcp_seq, \
+ _version, _cipher) \
+ (struct fun_admin_ktls_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .tlsid = cpu_to_be64(_tlsid), \
+ .tcp_seq = cpu_to_be32(_tcp_seq), .version = _version, \
+ .cipher = _cipher, \
+ }
+
+struct fun_admin_ktls_modify_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be64 tlsid;
+};
+
+struct fun_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+struct fun_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_cqe_info {
+ __be16 sqhd;
+ __be16 sqid;
+ __be16 cid;
+ __be16 sf_p;
+};
+
+enum fun_eprq_def {
+ FUN_EPRQ_PKT_ALIGN = 0x80,
+};
+
+struct fun_eprq_rqbuf {
+ __be64 bufaddr;
+};
+
+#define FUN_EPRQ_RQBUF_INIT(_bufaddr) \
+ (struct fun_eprq_rqbuf) { \
+ .bufaddr = cpu_to_be64(_bufaddr), \
+ }
+
+enum fun_eth_op {
+ FUN_ETH_OP_TX = 0x1,
+ FUN_ETH_OP_RX = 0x2,
+};
+
+enum {
+ FUN_ETH_OFFLOAD_EN = 0x8000,
+ FUN_ETH_OUTER_EN = 0x4000,
+ FUN_ETH_INNER_LSO = 0x2000,
+ FUN_ETH_INNER_TSO = 0x1000,
+ FUN_ETH_OUTER_IPV6 = 0x800,
+ FUN_ETH_OUTER_UDP = 0x400,
+ FUN_ETH_INNER_IPV6 = 0x200,
+ FUN_ETH_INNER_UDP = 0x100,
+ FUN_ETH_UPDATE_OUTER_L3_LEN = 0x80,
+ FUN_ETH_UPDATE_OUTER_L3_CKSUM = 0x40,
+ FUN_ETH_UPDATE_OUTER_L4_LEN = 0x20,
+ FUN_ETH_UPDATE_OUTER_L4_CKSUM = 0x10,
+ FUN_ETH_UPDATE_INNER_L3_LEN = 0x8,
+ FUN_ETH_UPDATE_INNER_L3_CKSUM = 0x4,
+ FUN_ETH_UPDATE_INNER_L4_LEN = 0x2,
+ FUN_ETH_UPDATE_INNER_L4_CKSUM = 0x1,
+};
+
+struct fun_eth_offload {
+ __be16 flags; /* combination of above flags */
+ __be16 mss; /* TSO max seg size */
+ __be16 tcp_doff_flags; /* TCP data offset + flags 16b word */
+ __be16 vlan;
+
+ __be16 inner_l3_off; /* Inner L3 header offset */
+ __be16 inner_l4_off; /* Inner L4 header offset */
+ __be16 outer_l3_off; /* Outer L3 header offset */
+ __be16 outer_l4_off; /* Outer L4 header offset */
+};
+
+static inline void fun_eth_offload_init(struct fun_eth_offload *s, u16 flags,
+ u16 mss, __be16 tcp_doff_flags,
+ __be16 vlan, u16 inner_l3_off,
+ u16 inner_l4_off, u16 outer_l3_off,
+ u16 outer_l4_off)
+{
+ s->flags = cpu_to_be16(flags);
+ s->mss = cpu_to_be16(mss);
+ s->tcp_doff_flags = tcp_doff_flags;
+ s->vlan = vlan;
+ s->inner_l3_off = cpu_to_be16(inner_l3_off);
+ s->inner_l4_off = cpu_to_be16(inner_l4_off);
+ s->outer_l3_off = cpu_to_be16(outer_l3_off);
+ s->outer_l4_off = cpu_to_be16(outer_l4_off);
+}
+
+struct fun_eth_tls {
+ __be64 tlsid;
+};
+
+enum {
+ FUN_ETH_TX_TLS = 0x8000,
+};
+
+struct fun_eth_tx_req {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 repr_idn;
+ __be16 encap_proto;
+
+ struct fun_eth_offload offload;
+
+ struct fun_dataop_hdr dataop;
+};
+
+struct fun_eth_rx_cv {
+ __be16 il4_prot_to_l2_type;
+};
+
+#define FUN_ETH_RX_CV_IL4_PROT_S 13U
+#define FUN_ETH_RX_CV_IL4_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_IL3_PROT_S 11U
+#define FUN_ETH_RX_CV_IL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_OL4_PROT_S 8U
+#define FUN_ETH_RX_CV_OL4_PROT_M 0x7
+
+#define FUN_ETH_RX_CV_ENCAP_TYPE_S 6U
+#define FUN_ETH_RX_CV_ENCAP_TYPE_M 0x3
+
+#define FUN_ETH_RX_CV_OL3_PROT_S 4U
+#define FUN_ETH_RX_CV_OL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_VLAN_TYPE_S 3U
+#define FUN_ETH_RX_CV_VLAN_TYPE_M 0x1
+
+#define FUN_ETH_RX_CV_L2_TYPE_S 2U
+#define FUN_ETH_RX_CV_L2_TYPE_M 0x1
+
+enum fun_rx_cv {
+ FUN_RX_CV_NONE = 0x0,
+ FUN_RX_CV_IP = 0x2,
+ FUN_RX_CV_IP6 = 0x3,
+ FUN_RX_CV_TCP = 0x2,
+ FUN_RX_CV_UDP = 0x3,
+ FUN_RX_CV_VXLAN = 0x2,
+ FUN_RX_CV_MPLS = 0x3,
+};
+
+struct fun_eth_cqe {
+ __u8 op;
+ __u8 len8;
+ __u8 nsgl;
+ __u8 repr_idn;
+ __be32 pkt_len;
+
+ __be64 timestamp;
+
+ __be16 pkt_cv;
+ __be16 rsvd0;
+ __be32 hash;
+
+ __be16 encap_proto;
+ __be16 vlan;
+ __be32 rsvd1;
+
+ __be32 buf_offset;
+ __be16 headroom;
+ __be16 csum;
+};
+
+enum fun_admin_adi_attr {
+ FUN_ADMIN_ADI_ATTR_MACADDR = 0x1,
+ FUN_ADMIN_ADI_ATTR_VLAN = 0x2,
+ FUN_ADMIN_ADI_ATTR_RATE = 0x3,
+};
+
+struct fun_adi_param {
+ union adi_param {
+ struct fun_adi_mac {
+ __be64 addr;
+ } mac;
+ struct fun_adi_vlan {
+ __be32 rsvd;
+ __be16 eth_type;
+ __be16 tci;
+ } vlan;
+ struct fun_adi_rate {
+ __be32 rsvd;
+ __be32 tx_mbps;
+ } rate;
+ } u;
+};
+
+#define FUN_ADI_MAC_INIT(_addr) \
+ (struct fun_adi_mac) { \
+ .addr = cpu_to_be64(_addr), \
+ }
+
+#define FUN_ADI_VLAN_INIT(_eth_type, _tci) \
+ (struct fun_adi_vlan) { \
+ .eth_type = cpu_to_be16(_eth_type), .tci = cpu_to_be16(_tci), \
+ }
+
+#define FUN_ADI_RATE_INIT(_tx_mbps) \
+ (struct fun_adi_rate) { \
+ .tx_mbps = cpu_to_be32(_tx_mbps), \
+ }
+
+struct fun_admin_adi_req {
+ struct fun_admin_req_common common;
+
+ union adi_req_subop {
+ struct fun_admin_adi_write_req {
+ __u8 subop;
+ __u8 attribute;
+ __be16 rsvd;
+ __be32 id;
+
+ struct fun_adi_param param;
+ } write;
+ } u;
+};
+
+#define FUN_ADMIN_ADI_WRITE_REQ_INIT(_subop, _attribute, _id) \
+ (struct fun_admin_adi_write_req) { \
+ .subop = (_subop), .attribute = (_attribute), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#endif /* __FUN_HCI_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
new file mode 100644
index 000000000000..8ab9f68434f5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "fun_dev.h"
+#include "fun_queue.h"
+
+/* Allocate memory for a queue. This includes the memory for the HW descriptor
+ * ring, an optional 64b HW write-back area, and an optional SW state ring.
+ * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring,
+ * and the VA of the write-back area.
+ */
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_sz, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va)
+{
+ int dev_node = dev_to_node(dma_dev);
+ size_t dma_sz;
+ void *va;
+
+ if (numa_node == NUMA_NO_NODE)
+ numa_node = dev_node;
+
+ /* Place optional write-back area at end of descriptor ring. */
+ dma_sz = hw_desc_sz * depth;
+ if (wb)
+ dma_sz += sizeof(u64);
+
+ set_dev_node(dma_dev, numa_node);
+ va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
+ set_dev_node(dma_dev, dev_node);
+ if (!va)
+ return NULL;
+
+ if (sw_desc_sz) {
+ *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
+ numa_node);
+ if (!*sw_va) {
+ dma_free_coherent(dma_dev, dma_sz, va, *dma_addr);
+ return NULL;
+ }
+ }
+
+ if (wb)
+ *wb_va = va + dma_sz - sizeof(u64);
+ return va;
+}
+EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
+
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va)
+{
+ if (hw_va) {
+ size_t sz = depth * hw_desc_sz;
+
+ if (wb)
+ sz += sizeof(u64);
+ dma_free_coherent(dma_dev, sz, hw_va, dma_addr);
+ }
+ kvfree(sw_va);
+}
+EXPORT_SYMBOL_GPL(fun_free_ring_mem);
+
+/* Prepare and issue an admin command to create an SQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *sqidp.
+ */
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epsq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ dma_addr_t wb_addr;
+ u32 hw_qid;
+ int rc;
+
+ if (sq_depth > fdev->q_depth)
+ return -EINVAL;
+ if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
+ sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
+
+ wb_addr = dma_addr + (sq_depth << sqe_size_log2);
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ sqid, cqid, sqe_size_log2,
+ sq_depth - 1, dma_addr, 0,
+ coal_nentries, coal_usec,
+ irq_num, scan_start_id,
+ scan_end_id, 0,
+ rq_buf_size_log2,
+ ilog2(sizeof(u64)), wb_addr);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_sq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *sqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_sq_create);
+
+/* Prepare and issue an admin command to create a CQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *cqidp.
+ */
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
+ u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epcq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ u32 hw_qid;
+ int rc;
+
+ if (cq_depth > fdev->q_depth)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ cqid, rqid, cqe_size_log2,
+ cq_depth - 1, dma_addr, tailroom,
+ headroom / 2, 0, coal_nentries,
+ coal_usec, irq_num,
+ scan_start_id, scan_end_id, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_cq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *cqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_cq_create);
+
+static bool fun_sq_is_head_wb(const struct fun_queue *funq)
+{
+ return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS;
+}
+
+static void fun_clean_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_rq_info *rqinfo;
+ unsigned int i;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ if (rqinfo->page) {
+ dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ }
+ }
+}
+
+static int fun_fill_rq(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+ int i, node = dev_to_node(dev);
+ struct fun_rq_info *rqinfo;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (unlikely(!rqinfo->page))
+ return -ENOMEM;
+
+ rqinfo->dma = dma_map_page(dev, rqinfo->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, rqinfo->dma))) {
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ return -ENOMEM;
+ }
+
+ funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma);
+ }
+
+ funq->rq_tail = funq->rq_depth - 1;
+ return 0;
+}
+
+static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset)
+{
+ if (buf_offset <= funq->rq_buf_offset) {
+ struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ struct device *dev = funq->fdev->dev;
+
+ dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ funq->num_rqe_to_fill++;
+ if (++funq->rq_buf_idx == funq->rq_depth)
+ funq->rq_buf_idx = 0;
+ }
+ funq->rq_buf_offset = buf_offset;
+}
+
+/* Given a command response with data scattered across >= 1 RQ buffers return
+ * a pointer to a contiguous buffer containing all the data. If the data is in
+ * one RQ buffer the start address within that buffer is returned, otherwise a
+ * new buffer is allocated and the data is gathered into it.
+ */
+static void *fun_data_from_rq(struct fun_queue *funq,
+ const struct fun_rsp_common *rsp, bool *need_free)
+{
+ u32 bufoff, total_len, remaining, fragsize, dataoff;
+ struct device *dma_dev = funq->fdev->dev;
+ const struct fun_dataop_rqbuf *databuf;
+ const struct fun_dataop_hdr *dataop;
+ const struct fun_rq_info *rqinfo;
+ void *data;
+
+ dataop = (void *)rsp + rsp->suboff8 * 8;
+ total_len = be32_to_cpu(dataop->total_len);
+
+ if (likely(dataop->nsgl == 1)) {
+ databuf = (struct fun_dataop_rqbuf *)dataop->imm;
+ bufoff = be32_to_cpu(databuf->bufoff);
+ fun_rq_update_pos(funq, bufoff);
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff,
+ total_len, DMA_FROM_DEVICE);
+ *need_free = false;
+ return page_address(rqinfo->page) + bufoff;
+ }
+
+ /* For scattered completions gather the fragments into one buffer. */
+
+ data = kmalloc(total_len, GFP_ATOMIC);
+ /* NULL is OK here. In case of failure we still need to consume the data
+ * for proper buffer accounting but indicate an error in the response.
+ */
+ if (likely(data))
+ *need_free = true;
+
+ dataoff = 0;
+ for (remaining = total_len; remaining; remaining -= fragsize) {
+ fun_rq_update_pos(funq, 0);
+ fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
+ if (data) {
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize,
+ DMA_FROM_DEVICE);
+ memcpy(data + dataoff, page_address(rqinfo->page),
+ fragsize);
+ dataoff += fragsize;
+ }
+ }
+ return data;
+}
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ const struct fun_cqe_info *info;
+ struct fun_rsp_common *rsp;
+ unsigned int new_cqes;
+ u16 sf_p, flags;
+ bool need_free;
+ void *cqe;
+
+ if (!max)
+ max = funq->cq_depth - 1;
+
+ for (new_cqes = 0; new_cqes < max; new_cqes++) {
+ cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
+ info = funq_cqe_info(funq, cqe);
+ sf_p = be16_to_cpu(info->sf_p);
+
+ if ((sf_p & 1) != funq->cq_phase)
+ break;
+
+ /* ensure the phase tag is read before other CQE fields */
+ dma_rmb();
+
+ if (++funq->cq_head == funq->cq_depth) {
+ funq->cq_head = 0;
+ funq->cq_phase = !funq->cq_phase;
+ }
+
+ rsp = cqe;
+ flags = be16_to_cpu(rsp->flags);
+
+ need_free = false;
+ if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) {
+ rsp = fun_data_from_rq(funq, rsp, &need_free);
+ if (!rsp) {
+ rsp = cqe;
+ rsp->len8 = 1;
+ if (rsp->ret == 0)
+ rsp->ret = ENOMEM;
+ }
+ }
+
+ if (funq->cq_cb)
+ funq->cq_cb(funq, funq->cb_data, rsp, info);
+ if (need_free)
+ kfree(rsp);
+ }
+
+ dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n",
+ funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase);
+ return new_cqes;
+}
+
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ unsigned int processed;
+ u32 db;
+
+ processed = __fun_process_cq(funq, max);
+
+ if (funq->num_rqe_to_fill) {
+ funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) %
+ funq->rq_depth;
+ funq->num_rqe_to_fill = 0;
+ writel(funq->rq_tail, funq->rq_db);
+ }
+
+ db = funq->cq_head | FUN_DB_IRQ_ARM_F;
+ writel(db, funq->cq_db);
+ return processed;
+}
+
+static int fun_alloc_sqes(struct fun_queue *funq)
+{
+ funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth,
+ 1 << funq->sqe_size_log2, 0,
+ fun_sq_is_head_wb(funq),
+ NUMA_NO_NODE, &funq->sq_dma_addr,
+ NULL, &funq->sq_head);
+ return funq->sq_cmds ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_cqes(struct fun_queue *funq)
+{
+ funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
+ 1 << funq->cqe_size_log2, 0, false,
+ NUMA_NO_NODE, &funq->cq_dma_addr, NULL,
+ NULL);
+ return funq->cqes ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_rqes(struct fun_queue *funq)
+{
+ funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
+ sizeof(*funq->rqes),
+ sizeof(*funq->rq_info), false,
+ NUMA_NO_NODE, &funq->rq_dma_addr,
+ (void **)&funq->rq_info, NULL);
+ return funq->rqes ? 0 : -ENOMEM;
+}
+
+/* Free a queue's structures. */
+void fun_free_queue(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+
+ fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false,
+ funq->cqes, funq->cq_dma_addr, NULL);
+ fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2,
+ fun_sq_is_head_wb(funq), funq->sq_cmds,
+ funq->sq_dma_addr, NULL);
+
+ if (funq->rqes) {
+ fun_clean_rq(funq);
+ fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
+ false, funq->rqes, funq->rq_dma_addr,
+ funq->rq_info);
+ }
+
+ kfree(funq);
+}
+
+/* Allocate and initialize a funq's structures. */
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req)
+{
+ struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL);
+
+ if (!funq)
+ return NULL;
+
+ funq->fdev = fdev;
+ spin_lock_init(&funq->sq_lock);
+
+ funq->qid = qid;
+
+ /* Initial CQ/SQ/RQ ids */
+ if (req->rq_depth) {
+ funq->cqid = 2 * qid;
+ if (funq->qid) {
+ /* I/O Q: use rqid = cqid, sqid = +1 */
+ funq->rqid = funq->cqid;
+ funq->sqid = funq->rqid + 1;
+ } else {
+ /* Admin Q: sqid is always 0, use ID 1 for RQ */
+ funq->sqid = 0;
+ funq->rqid = 1;
+ }
+ } else {
+ funq->cqid = qid;
+ funq->sqid = qid;
+ }
+
+ funq->cq_flags = req->cq_flags;
+ funq->sq_flags = req->sq_flags;
+
+ funq->cqe_size_log2 = req->cqe_size_log2;
+ funq->sqe_size_log2 = req->sqe_size_log2;
+
+ funq->cq_depth = req->cq_depth;
+ funq->sq_depth = req->sq_depth;
+
+ funq->cq_intcoal_nentries = req->cq_intcoal_nentries;
+ funq->cq_intcoal_usec = req->cq_intcoal_usec;
+
+ funq->sq_intcoal_nentries = req->sq_intcoal_nentries;
+ funq->sq_intcoal_usec = req->sq_intcoal_usec;
+
+ if (fun_alloc_cqes(funq))
+ goto free_funq;
+
+ funq->cq_phase = 1;
+
+ if (fun_alloc_sqes(funq))
+ goto free_funq;
+
+ if (req->rq_depth) {
+ funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ;
+ funq->rq_depth = req->rq_depth;
+ funq->rq_buf_offset = -1;
+
+ if (fun_alloc_rqes(funq) || fun_fill_rq(funq))
+ goto free_funq;
+ }
+
+ funq->cq_vector = -1;
+ funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info);
+
+ /* SQ/CQ 0 are implicitly created, assign their doorbells now.
+ * Other queues are assigned doorbells at their explicit creation.
+ */
+ if (funq->sqid == 0)
+ funq->sq_db = fun_sq_db_addr(fdev, 0);
+ if (funq->cqid == 0)
+ funq->cq_db = fun_cq_db_addr(fdev, 0);
+
+ return funq;
+
+free_funq:
+ fun_free_queue(funq);
+ return NULL;
+}
+
+/* Create a funq's CQ on the device. */
+static int fun_create_cq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ unsigned int rqid;
+ int rc;
+
+ rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
+ funq->rqid : FUN_HCI_ID_INVALID;
+ rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
+ funq->cqe_size_log2, funq->cq_depth,
+ funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
+ funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
+ &funq->cqid, &funq->cq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
+
+ return rc;
+}
+
+/* Create a funq's SQ on the device. */
+static int fun_create_sq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
+ funq->sqe_size_log2, funq->sq_depth,
+ funq->sq_dma_addr, funq->sq_intcoal_nentries,
+ funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
+ 0, &funq->sqid, &funq->sq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
+
+ return rc;
+}
+
+/* Create a funq's RQ on the device. */
+int fun_create_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0,
+ funq->rq_depth, funq->rq_dma_addr, 0, 0,
+ funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid,
+ &funq->rq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid);
+
+ return rc;
+}
+
+static unsigned int funq_irq(struct fun_queue *funq)
+{
+ return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector);
+}
+
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data)
+{
+ int rc;
+
+ if (funq->cq_vector < 0)
+ return -EINVAL;
+
+ funq->irq_handler = handler;
+ funq->irq_data = data;
+
+ snprintf(funq->irqname, sizeof(funq->irqname),
+ funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
+
+ rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data);
+ if (rc)
+ funq->irq_handler = NULL;
+
+ return rc;
+}
+
+/* Create all component queues of a funq on the device. */
+int fun_create_queue(struct fun_queue *funq)
+{
+ int rc;
+
+ rc = fun_create_cq(funq);
+ if (rc)
+ return rc;
+
+ if (funq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto release_cq;
+ }
+
+ rc = fun_create_sq(funq);
+ if (rc)
+ goto release_rq;
+
+ return 0;
+
+release_rq:
+ fun_destroy_sq(funq->fdev, funq->rqid);
+release_cq:
+ fun_destroy_cq(funq->fdev, funq->cqid);
+ return rc;
+}
+
+void fun_free_irq(struct fun_queue *funq)
+{
+ if (funq->irq_handler) {
+ unsigned int vector = funq_irq(funq);
+
+ free_irq(vector, funq->irq_data);
+ funq->irq_handler = NULL;
+ funq->irq_data = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
new file mode 100644
index 000000000000..7fb53d0ae8b0
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_QEUEUE_H
+#define _FUN_QEUEUE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct device;
+struct fun_dev;
+struct fun_queue;
+struct fun_cqe_info;
+struct fun_rsp_common;
+
+typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
+ const struct fun_cqe_info *info);
+
+struct fun_rq_info {
+ dma_addr_t dma;
+ struct page *page;
+};
+
+/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
+struct fun_queue {
+ struct fun_dev *fdev;
+ spinlock_t sq_lock;
+
+ dma_addr_t cq_dma_addr;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t rq_dma_addr;
+
+ u32 __iomem *cq_db;
+ u32 __iomem *sq_db;
+ u32 __iomem *rq_db;
+
+ void *cqes;
+ void *sq_cmds;
+ struct fun_eprq_rqbuf *rqes;
+ struct fun_rq_info *rq_info;
+
+ u32 cqid;
+ u32 sqid;
+ u32 rqid;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u16 cq_head;
+ u16 sq_tail;
+ u16 rq_tail;
+
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cqe_info_offset;
+
+ u16 rq_buf_idx;
+ int rq_buf_offset;
+ u16 num_rqe_to_fill;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ /* SQ head writeback */
+ u16 sq_comp;
+
+ volatile __be64 *sq_head;
+
+ cq_callback_t cq_cb;
+ void *cb_data;
+
+ irq_handler_t irq_handler;
+ void *irq_data;
+ s16 cq_vector;
+ u8 cq_phase;
+
+ /* I/O q index */
+ u16 qid;
+
+ char irqname[24];
+};
+
+static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
+{
+ return funq->sq_cmds + (pos << funq->sqe_size_log2);
+}
+
+static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
+{
+ if (++tail == funq->sq_depth)
+ tail = 0;
+ funq->sq_tail = tail;
+ writel(tail, funq->sq_db);
+}
+
+static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
+ void *cqe)
+{
+ return cqe + funq->cqe_info_offset;
+}
+
+static inline void funq_rq_post(struct fun_queue *funq)
+{
+ writel(funq->rq_tail, funq->rq_db);
+}
+
+struct fun_queue_alloc_req {
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+};
+
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id,
+ u32 *cqidp, u32 __iomem **dbp);
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_size, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va);
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
+
+#define fun_destroy_sq(fdev, sqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
+#define fun_destroy_cq(fdev, cqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
+
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req);
+void fun_free_queue(struct fun_queue *funq);
+
+static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
+ void *cb_data)
+{
+ funq->cq_cb = cb;
+ funq->cb_data = cb_data;
+}
+
+int fun_create_rq(struct fun_queue *funq);
+int fun_create_queue(struct fun_queue *funq);
+
+void fun_free_irq(struct fun_queue *funq);
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data);
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
+
+#endif /* _FUN_QEUEUE_H */
diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig
new file mode 100644
index 000000000000..c72ad9386400
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible Ethernet driver configuration
+#
+
+config FUN_ETH
+ tristate "Fungible Ethernet device driver"
+ depends on PCI && PCI_MSI
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
+ select NET_DEVLINK
+ select FUN_CORE
+ help
+ This driver supports the Ethernet functionality of Fungible adapters.
+ It works with both physical and virtual functions.
+
+ To compile this driver as a module, choose M here. The module
+ will be called funeth.
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
new file mode 100644
index 000000000000..646d69595b4f
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+
+obj-$(CONFIG_FUN_ETH) += funeth.o
+
+funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \
+ funeth_ethtool.o
+
+funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o
diff --git a/drivers/net/ethernet/fungible/funeth/fun_port.h b/drivers/net/ethernet/fungible/funeth/fun_port.h
new file mode 100644
index 000000000000..0f9da44e3786
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/fun_port.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_PORT_H
+#define _FUN_PORT_H
+
+enum port_mac_rx_stats {
+ PORT_MAC_RX_etherStatsOctets = 0x0,
+ PORT_MAC_RX_OctetsReceivedOK = 0x1,
+ PORT_MAC_RX_aAlignmentErrors = 0x2,
+ PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3,
+ PORT_MAC_RX_aFrameTooLongErrors = 0x4,
+ PORT_MAC_RX_aInRangeLengthErrors = 0x5,
+ PORT_MAC_RX_aFramesReceivedOK = 0x6,
+ PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7,
+ PORT_MAC_RX_VLANReceivedOK = 0x8,
+ PORT_MAC_RX_ifInErrors = 0x9,
+ PORT_MAC_RX_ifInUcastPkts = 0xa,
+ PORT_MAC_RX_ifInMulticastPkts = 0xb,
+ PORT_MAC_RX_ifInBroadcastPkts = 0xc,
+ PORT_MAC_RX_etherStatsDropEvents = 0xd,
+ PORT_MAC_RX_etherStatsPkts = 0xe,
+ PORT_MAC_RX_etherStatsUndersizePkts = 0xf,
+ PORT_MAC_RX_etherStatsPkts64Octets = 0x10,
+ PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11,
+ PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12,
+ PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13,
+ PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14,
+ PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15,
+ PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16,
+ PORT_MAC_RX_etherStatsOversizePkts = 0x17,
+ PORT_MAC_RX_etherStatsJabbers = 0x18,
+ PORT_MAC_RX_etherStatsFragments = 0x19,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29,
+ PORT_MAC_RX_MACControlFramesReceived = 0x2a,
+ PORT_MAC_RX_STATS_MAX = 0x2b,
+};
+
+enum port_mac_tx_stats {
+ PORT_MAC_TX_etherStatsOctets = 0x0,
+ PORT_MAC_TX_OctetsTransmittedOK = 0x1,
+ PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2,
+ PORT_MAC_TX_aFramesTransmittedOK = 0x3,
+ PORT_MAC_TX_VLANTransmittedOK = 0x4,
+ PORT_MAC_TX_ifOutErrors = 0x5,
+ PORT_MAC_TX_ifOutUcastPkts = 0x6,
+ PORT_MAC_TX_ifOutMulticastPkts = 0x7,
+ PORT_MAC_TX_ifOutBroadcastPkts = 0x8,
+ PORT_MAC_TX_etherStatsPkts64Octets = 0x9,
+ PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa,
+ PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb,
+ PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc,
+ PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd,
+ PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe,
+ PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f,
+ PORT_MAC_TX_MACControlFramesTransmitted = 0x20,
+ PORT_MAC_TX_etherStatsPkts = 0x21,
+ PORT_MAC_TX_STATS_MAX = 0x22,
+};
+
+enum port_mac_fec_stats {
+ PORT_MAC_FEC_Correctable = 0x0,
+ PORT_MAC_FEC_Uncorrectable = 0x1,
+ PORT_MAC_FEC_STATS_MAX = 0x2,
+};
+
+#endif /* _FUN_PORT_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h
new file mode 100644
index 000000000000..1250e10d21db
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_H
+#define _FUNETH_H
+
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/net_tstamp.h>
+#include <linux/mutex.h>
+#include <linux/seqlock.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+#include "fun_dev.h"
+
+#define ADMIN_SQE_SIZE SZ_128
+#define ADMIN_CQE_SIZE SZ_64
+#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+#define FUN_MAX_MTU 9024
+
+#define SQ_DEPTH 512U
+#define CQ_DEPTH 1024U
+#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
+
+#define CQ_INTCOAL_USEC 10
+#define CQ_INTCOAL_NPKT 16
+#define SQ_INTCOAL_USEC 10
+#define SQ_INTCOAL_NPKT 16
+
+#define INVALID_LPORT 0xffff
+
+#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE)
+
+struct fun_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ __be16 vlan_proto;
+ u8 qos;
+ u8 spoofchk:1;
+ u8 trusted:1;
+ unsigned int max_rate;
+};
+
+/* "subclass" of fun_dev for Ethernet functions */
+struct fun_ethdev {
+ struct fun_dev fdev;
+
+ /* the function's network ports */
+ struct net_device **netdevs;
+ unsigned int num_ports;
+
+ /* configuration for the function's virtual ports */
+ unsigned int num_vports;
+ struct fun_vport_info *vport_info;
+
+ struct mutex state_mutex; /* nests inside RTNL if both taken */
+
+ unsigned int nsqs_per_port;
+};
+
+static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p)
+{
+ return container_of(p, struct fun_ethdev, fdev);
+}
+
+struct fun_qset {
+ struct funeth_rxq **rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq **xdpqs;
+ unsigned int nrxqs;
+ unsigned int ntxqs;
+ unsigned int nxdpqs;
+ unsigned int rxq_start;
+ unsigned int txq_start;
+ unsigned int xdpq_start;
+ unsigned int cq_depth;
+ unsigned int rq_depth;
+ unsigned int sq_depth;
+ int state;
+};
+
+/* Per netdevice driver state, i.e., netdev_priv. */
+struct funeth_priv {
+ struct fun_dev *fdev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ struct funeth_rxq * __rcu *rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq * __rcu *xdpqs;
+
+ struct xarray irqs;
+ unsigned int num_tx_irqs;
+ unsigned int num_rx_irqs;
+ unsigned int rx_irq_ofst;
+
+ unsigned int lane_attrs;
+ u16 lport;
+
+ /* link settings */
+ u64 port_caps;
+ u64 advertising;
+ u64 lp_advertising;
+ unsigned int link_speed;
+ u8 xcvr_type;
+ u8 active_fc;
+ u8 active_fec;
+ u8 link_down_reason;
+ seqcount_t link_seq;
+
+ u32 msg_enable;
+
+ unsigned int num_xdpqs;
+
+ /* ethtool, etc. config parameters */
+ unsigned int sq_depth;
+ unsigned int rq_depth;
+ unsigned int cq_depth;
+ unsigned int cq_irq_db;
+ u8 tx_coal_usec;
+ u8 tx_coal_count;
+ u8 rx_coal_usec;
+ u8 rx_coal_count;
+
+ struct hwtstamp_config hwtstamp_cfg;
+
+ /* cumulative queue stats from earlier queue instances */
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+
+ /* RSS */
+ unsigned int rss_hw_id;
+ enum fun_eth_hash_alg hash_algo;
+ u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE];
+ unsigned int indir_table_nentries;
+ u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT];
+ dma_addr_t rss_dma_addr;
+ void *rss_cfg;
+
+ /* DMA area for port stats */
+ dma_addr_t stats_dma_addr;
+ __be64 *stats;
+
+ struct bpf_prog *xdp_prog;
+
+ struct devlink_port dl_port;
+
+ /* kTLS state */
+ unsigned int ktls_id;
+ atomic64_t tx_tls_add;
+ atomic64_t tx_tls_del;
+ atomic64_t tx_tls_resync;
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev);
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data);
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data);
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid);
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack);
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx);
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx);
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op);
+
+#endif /* _FUNETH_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
new file mode 100644
index 000000000000..a849b3c6b01f
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+
+static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct devlink_ops fun_dl_ops = {
+ .info_get = fun_dl_info_get,
+};
+
+struct devlink *fun_devlink_alloc(struct device *dev)
+{
+ return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev);
+}
+
+void fun_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+void fun_devlink_register(struct devlink *devlink)
+{
+ devlink_register(devlink);
+}
+
+void fun_devlink_unregister(struct devlink *devlink)
+{
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.h b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
new file mode 100644
index 000000000000..e40464d57ff4
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUNETH_DEVLINK_H
+#define __FUNETH_DEVLINK_H
+
+#include <net/devlink.h>
+
+struct devlink *fun_devlink_alloc(struct device *dev);
+void fun_devlink_free(struct devlink *devlink);
+void fun_devlink_register(struct devlink *devlink);
+void fun_devlink_unregister(struct devlink *devlink);
+
+#endif /* __FUNETH_DEVLINK_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
new file mode 100644
index 000000000000..d081168c95fa
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -0,0 +1,1162 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/netdevice.h>
+#include <linux/nvme.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include "funeth.h"
+#include "fun_port.h"
+#include "funeth_txrx.h"
+
+/* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
+ * pages is 8. Require it for all types of queues though some could work with
+ * fewer entries.
+ */
+#define FUNETH_MIN_QDEPTH 8
+
+static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_tx_octets_total",
+ "mac_tx_frames_total",
+ "mac_tx_vlan_frames_ok",
+ "mac_tx_unicast_frames",
+ "mac_tx_multicast_frames",
+ "mac_tx_broadcast_frames",
+ "mac_tx_errors",
+ "mac_tx_CBFCPAUSE0",
+ "mac_tx_CBFCPAUSE1",
+ "mac_tx_CBFCPAUSE2",
+ "mac_tx_CBFCPAUSE3",
+ "mac_tx_CBFCPAUSE4",
+ "mac_tx_CBFCPAUSE5",
+ "mac_tx_CBFCPAUSE6",
+ "mac_tx_CBFCPAUSE7",
+ "mac_tx_CBFCPAUSE8",
+ "mac_tx_CBFCPAUSE9",
+ "mac_tx_CBFCPAUSE10",
+ "mac_tx_CBFCPAUSE11",
+ "mac_tx_CBFCPAUSE12",
+ "mac_tx_CBFCPAUSE13",
+ "mac_tx_CBFCPAUSE14",
+ "mac_tx_CBFCPAUSE15",
+};
+
+static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_rx_octets_total",
+ "mac_rx_frames_total",
+ "mac_rx_VLAN_frames_ok",
+ "mac_rx_unicast_frames",
+ "mac_rx_multicast_frames",
+ "mac_rx_broadcast_frames",
+ "mac_rx_drop_events",
+ "mac_rx_errors",
+ "mac_rx_alignment_errors",
+ "mac_rx_CBFCPAUSE0",
+ "mac_rx_CBFCPAUSE1",
+ "mac_rx_CBFCPAUSE2",
+ "mac_rx_CBFCPAUSE3",
+ "mac_rx_CBFCPAUSE4",
+ "mac_rx_CBFCPAUSE5",
+ "mac_rx_CBFCPAUSE6",
+ "mac_rx_CBFCPAUSE7",
+ "mac_rx_CBFCPAUSE8",
+ "mac_rx_CBFCPAUSE9",
+ "mac_rx_CBFCPAUSE10",
+ "mac_rx_CBFCPAUSE11",
+ "mac_rx_CBFCPAUSE12",
+ "mac_rx_CBFCPAUSE13",
+ "mac_rx_CBFCPAUSE14",
+ "mac_rx_CBFCPAUSE15",
+};
+
+static const char * const txq_stat_names[] = {
+ "tx_pkts",
+ "tx_bytes",
+ "tx_cso",
+ "tx_tso",
+ "tx_encapsulated_tso",
+ "tx_more",
+ "tx_queue_stops",
+ "tx_queue_restarts",
+ "tx_mapping_errors",
+ "tx_tls_encrypted_packets",
+ "tx_tls_encrypted_bytes",
+ "tx_tls_ooo",
+ "tx_tls_drop_no_sync_data",
+};
+
+static const char * const xdpq_stat_names[] = {
+ "tx_xdp_pkts",
+ "tx_xdp_bytes",
+ "tx_xdp_full",
+ "tx_xdp_mapping_errors",
+};
+
+static const char * const rxq_stat_names[] = {
+ "rx_pkts",
+ "rx_bytes",
+ "rx_cso",
+ "gro_pkts",
+ "gro_merged",
+ "rx_xdp_tx",
+ "rx_xdp_redir",
+ "rx_xdp_drops",
+ "rx_buffers",
+ "rx_page_allocs",
+ "rx_drops",
+ "rx_budget_exhausted",
+ "rx_mapping_errors",
+};
+
+static const char * const tls_stat_names[] = {
+ "tx_tls_ctx",
+ "tx_tls_del",
+ "tx_tls_resync",
+};
+
+static void fun_link_modes_to_ethtool(u64 modes,
+ unsigned long *ethtool_modes_map)
+{
+#define ADD_LINK_MODE(mode) \
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
+
+ if (modes & FUN_PORT_CAP_AUTONEG)
+ ADD_LINK_MODE(Autoneg);
+ if (modes & FUN_PORT_CAP_1000_X)
+ ADD_LINK_MODE(1000baseX_Full);
+ if (modes & FUN_PORT_CAP_10G_R) {
+ ADD_LINK_MODE(10000baseCR_Full);
+ ADD_LINK_MODE(10000baseSR_Full);
+ ADD_LINK_MODE(10000baseLR_Full);
+ ADD_LINK_MODE(10000baseER_Full);
+ }
+ if (modes & FUN_PORT_CAP_25G_R) {
+ ADD_LINK_MODE(25000baseCR_Full);
+ ADD_LINK_MODE(25000baseSR_Full);
+ }
+ if (modes & FUN_PORT_CAP_40G_R4) {
+ ADD_LINK_MODE(40000baseCR4_Full);
+ ADD_LINK_MODE(40000baseSR4_Full);
+ ADD_LINK_MODE(40000baseLR4_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R2) {
+ ADD_LINK_MODE(50000baseCR2_Full);
+ ADD_LINK_MODE(50000baseSR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R) {
+ ADD_LINK_MODE(50000baseCR_Full);
+ ADD_LINK_MODE(50000baseSR_Full);
+ ADD_LINK_MODE(50000baseLR_ER_FR_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R4) {
+ ADD_LINK_MODE(100000baseCR4_Full);
+ ADD_LINK_MODE(100000baseSR4_Full);
+ ADD_LINK_MODE(100000baseLR4_ER4_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R2) {
+ ADD_LINK_MODE(100000baseCR2_Full);
+ ADD_LINK_MODE(100000baseSR2_Full);
+ ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_FEC_NONE)
+ ADD_LINK_MODE(FEC_NONE);
+ if (modes & FUN_PORT_CAP_FEC_FC)
+ ADD_LINK_MODE(FEC_BASER);
+ if (modes & FUN_PORT_CAP_FEC_RS)
+ ADD_LINK_MODE(FEC_RS);
+ if (modes & FUN_PORT_CAP_RX_PAUSE)
+ ADD_LINK_MODE(Pause);
+
+#undef ADD_LINK_MODE
+}
+
+static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
+{
+ bool rx_pause, tx_pause;
+
+ rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
+ tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+}
+
+static unsigned int fun_port_type(unsigned int xcvr)
+{
+ if (!xcvr)
+ return PORT_NONE;
+
+ switch (xcvr & 7) {
+ case FUN_XCVR_BASET:
+ return PORT_TP;
+ case FUN_XCVR_CU:
+ return PORT_DA;
+ default:
+ return PORT_FIBRE;
+ }
+}
+
+static int fun_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int seq, speed, xcvr;
+ u64 lp_advertising;
+ bool link_up;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
+
+ /* Link settings change asynchronously, take a consistent snapshot */
+ do {
+ seq = read_seqcount_begin(&fp->link_seq);
+ link_up = netif_carrier_ok(netdev);
+ speed = fp->link_speed;
+ xcvr = fp->xcvr_type;
+ lp_advertising = fp->lp_advertising;
+ } while (read_seqcount_retry(&fp->link_seq, seq));
+
+ if (link_up) {
+ ks->base.speed = speed;
+ ks->base.duplex = DUPLEX_FULL;
+ fun_link_modes_to_ethtool(lp_advertising,
+ ks->link_modes.lp_advertising);
+ } else {
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ks->base.port = fun_port_type(xcvr);
+
+ fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
+ if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
+
+ fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
+ set_asym_pause(fp->advertising, ks);
+ return 0;
+}
+
+static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
+{
+ u64 modes = 0;
+
+#define HAS_MODE(mode) \
+ ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
+
+ if (HAS_MODE(1000baseX_Full))
+ modes |= FUN_PORT_CAP_1000_X;
+ if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
+ HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
+ modes |= FUN_PORT_CAP_10G_R;
+ if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
+ modes |= FUN_PORT_CAP_25G_R;
+ if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
+ HAS_MODE(40000baseLR4_Full))
+ modes |= FUN_PORT_CAP_40G_R4;
+ if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
+ modes |= FUN_PORT_CAP_50G_R2;
+ if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
+ HAS_MODE(50000baseLR_ER_FR_Full))
+ modes |= FUN_PORT_CAP_50G_R;
+ if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
+ HAS_MODE(100000baseLR4_ER4_Full))
+ modes |= FUN_PORT_CAP_100G_R4;
+ if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
+ HAS_MODE(100000baseLR2_ER2_FR2_Full))
+ modes |= FUN_PORT_CAP_100G_R2;
+
+ return modes;
+#undef HAS_MODE
+}
+
+static u64 fun_speed_to_link_mode(unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_100000:
+ return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
+ case SPEED_50000:
+ return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
+ case SPEED_40000:
+ return FUN_PORT_CAP_40G_R4;
+ case SPEED_25000:
+ return FUN_PORT_CAP_25G_R;
+ case SPEED_10000:
+ return FUN_PORT_CAP_10G_R;
+ case SPEED_1000:
+ return FUN_PORT_CAP_1000_X;
+ default:
+ return 0;
+ }
+}
+
+static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
+{
+ int err;
+
+ if (new_advert == fp->advertising)
+ return 0;
+
+ err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
+ if (!err)
+ fp->advertising = new_advert;
+ return err;
+}
+
+#define FUN_PORT_CAP_FEC_MASK \
+ (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
+
+static int fun_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ /* eswitch ports don't support mode changes */
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+
+ if (ks->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (ks->base.autoneg == AUTONEG_ENABLE &&
+ !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+
+ if (ks->base.autoneg == AUTONEG_ENABLE) {
+ if (linkmode_empty(ks->link_modes.advertising))
+ return -EINVAL;
+
+ fun_link_modes_to_ethtool(fp->port_caps, supported);
+ if (!linkmode_subset(ks->link_modes.advertising, supported))
+ return -EINVAL;
+
+ new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
+ } else {
+ new_advert = fun_speed_to_link_mode(ks->base.speed);
+ new_advert &= fp->port_caps;
+ if (!new_advert)
+ return -EINVAL;
+ }
+ new_advert |= fp->advertising &
+ (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static void fun_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ u8 active_pause = fp->active_fc;
+
+ pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
+ pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
+ pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ /* Forcing PAUSE settings with AN enabled is unsupported. */
+ if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+ if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+ if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
+ return -EINVAL;
+ if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
+ return -EINVAL;
+
+ new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
+ if (pause->tx_pause)
+ new_advert |= FUN_PORT_CAP_TX_PAUSE;
+ if (pause->rx_pause)
+ new_advert |= FUN_PORT_CAP_RX_PAUSE;
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static int fun_restart_an(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
+ FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int beacon;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
+ return -EOPNOTSUPP;
+
+ beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
+ FUN_PORT_LED_BEACON_OFF;
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
+}
+
+static void fun_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
+}
+
+static u32 fun_get_msglevel(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->msg_enable;
+}
+
+static void fun_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ fp->msg_enable = value;
+}
+
+static int fun_get_regs_len(struct net_device *dev)
+{
+ return NVME_REG_ACQ + sizeof(u64);
+}
+
+static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ void __iomem *bar = fp->fdev->bar;
+
+ regs->version = 0;
+ *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP);
+ *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS);
+ *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
+ *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
+ *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC);
+ *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS);
+ *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA);
+ *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ);
+ *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ);
+}
+
+static int fun_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = fp->rx_coal_usec;
+ coal->rx_max_coalesced_frames = fp->rx_coal_count;
+ coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
+ coal->tx_coalesce_usecs = fp->tx_coal_usec;
+ coal->tx_max_coalesced_frames = fp->tx_coal_count;
+ return 0;
+}
+
+static int fun_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_rxq **rxqs;
+ unsigned int i, db_val;
+
+ if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
+ coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
+ return -EINVAL;
+
+ /* a timer is required if there's any coalescing */
+ if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
+ (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
+ return -EINVAL;
+
+ fp->rx_coal_usec = coal->rx_coalesce_usecs;
+ fp->rx_coal_count = coal->rx_max_coalesced_frames;
+ fp->tx_coal_usec = coal->tx_coalesce_usecs;
+ fp->tx_coal_count = coal->tx_max_coalesced_frames;
+
+ db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+ WRITE_ONCE(fp->cq_irq_db, db_val);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return 0;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
+
+ db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
+
+ return 0;
+}
+
+static void fun_get_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ chan->max_rx = netdev->num_rx_queues;
+ chan->rx_count = netdev->real_num_rx_queues;
+
+ chan->max_tx = netdev->num_tx_queues;
+ chan->tx_count = netdev->real_num_tx_queues;
+}
+
+static int fun_set_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ if (!chan->tx_count || !chan->rx_count)
+ return -EINVAL;
+
+ if (chan->tx_count == netdev->real_num_tx_queues &&
+ chan->rx_count == netdev->real_num_rx_queues)
+ return 0;
+
+ if (netif_running(netdev))
+ return fun_change_num_queues(netdev, chan->tx_count,
+ chan->rx_count);
+
+ fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
+ return 0;
+}
+
+static void fun_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int max_depth = fp->fdev->q_depth;
+
+ /* We size CQs to be twice the RQ depth so max RQ depth is half the
+ * max queue depth.
+ */
+ ring->rx_max_pending = max_depth / 2;
+ ring->tx_max_pending = max_depth;
+
+ ring->rx_pending = fp->rq_depth;
+ ring->tx_pending = fp->sq_depth;
+
+ kring->rx_buf_len = PAGE_SIZE;
+ kring->cqe_size = FUNETH_CQE_SIZE;
+}
+
+static int fun_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* queue depths must be powers-of-2 */
+ if (!is_power_of_2(ring->rx_pending) ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
+ ring->tx_pending < FUNETH_MIN_QDEPTH)
+ return -EINVAL;
+
+ if (fp->sq_depth == ring->tx_pending &&
+ fp->rq_depth == ring->rx_pending)
+ return 0;
+
+ if (netif_running(netdev)) {
+ struct fun_qset req = {
+ .cq_depth = 2 * ring->rx_pending,
+ .rq_depth = ring->rx_pending,
+ .sq_depth = ring->tx_pending
+ };
+
+ rc = fun_replace_queues(netdev, &req, extack);
+ if (rc)
+ return rc;
+ }
+
+ fp->sq_depth = ring->tx_pending;
+ fp->rq_depth = ring->rx_pending;
+ fp->cq_depth = 2 * fp->rq_depth;
+ return 0;
+}
+
+static int fun_get_sset_count(struct net_device *dev, int sset)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ int n;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
+ (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
+ (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
+ ARRAY_SIZE(tls_stat_names);
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ n += ARRAY_SIZE(mac_tx_stat_names) +
+ ARRAY_SIZE(mac_rx_stat_names);
+ }
+ return n;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int i, j;
+ u8 *p = data;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
+ p += sizeof(mac_tx_stat_names);
+ memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
+ p += sizeof(mac_rx_stat_names);
+ }
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, txq_stat_names[j]);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]",
+ xdpq_stat_names[j], i);
+ }
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, xdpq_stat_names[j]);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, rxq_stat_names[j]);
+
+ for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
+ ethtool_sprintf(&p, tls_stat_names[j]);
+ break;
+ default:
+ break;
+ }
+}
+
+static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
+{
+#define TX_STAT(s) \
+ *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+
+ TX_STAT(etherStatsOctets);
+ TX_STAT(etherStatsPkts);
+ TX_STAT(VLANTransmittedOK);
+ TX_STAT(ifOutUcastPkts);
+ TX_STAT(ifOutMulticastPkts);
+ TX_STAT(ifOutBroadcastPkts);
+ TX_STAT(ifOutErrors);
+ TX_STAT(CBFCPAUSEFramesTransmitted_0);
+ TX_STAT(CBFCPAUSEFramesTransmitted_1);
+ TX_STAT(CBFCPAUSEFramesTransmitted_2);
+ TX_STAT(CBFCPAUSEFramesTransmitted_3);
+ TX_STAT(CBFCPAUSEFramesTransmitted_4);
+ TX_STAT(CBFCPAUSEFramesTransmitted_5);
+ TX_STAT(CBFCPAUSEFramesTransmitted_6);
+ TX_STAT(CBFCPAUSEFramesTransmitted_7);
+ TX_STAT(CBFCPAUSEFramesTransmitted_8);
+ TX_STAT(CBFCPAUSEFramesTransmitted_9);
+ TX_STAT(CBFCPAUSEFramesTransmitted_10);
+ TX_STAT(CBFCPAUSEFramesTransmitted_11);
+ TX_STAT(CBFCPAUSEFramesTransmitted_12);
+ TX_STAT(CBFCPAUSEFramesTransmitted_13);
+ TX_STAT(CBFCPAUSEFramesTransmitted_14);
+ TX_STAT(CBFCPAUSEFramesTransmitted_15);
+
+#define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
+
+ RX_STAT(etherStatsOctets);
+ RX_STAT(etherStatsPkts);
+ RX_STAT(VLANReceivedOK);
+ RX_STAT(ifInUcastPkts);
+ RX_STAT(ifInMulticastPkts);
+ RX_STAT(ifInBroadcastPkts);
+ RX_STAT(etherStatsDropEvents);
+ RX_STAT(ifInErrors);
+ RX_STAT(aAlignmentErrors);
+ RX_STAT(CBFCPAUSEFramesReceived_0);
+ RX_STAT(CBFCPAUSEFramesReceived_1);
+ RX_STAT(CBFCPAUSEFramesReceived_2);
+ RX_STAT(CBFCPAUSEFramesReceived_3);
+ RX_STAT(CBFCPAUSEFramesReceived_4);
+ RX_STAT(CBFCPAUSEFramesReceived_5);
+ RX_STAT(CBFCPAUSEFramesReceived_6);
+ RX_STAT(CBFCPAUSEFramesReceived_7);
+ RX_STAT(CBFCPAUSEFramesReceived_8);
+ RX_STAT(CBFCPAUSEFramesReceived_9);
+ RX_STAT(CBFCPAUSEFramesReceived_10);
+ RX_STAT(CBFCPAUSEFramesReceived_11);
+ RX_STAT(CBFCPAUSEFramesReceived_12);
+ RX_STAT(CBFCPAUSEFramesReceived_13);
+ RX_STAT(CBFCPAUSEFramesReceived_14);
+ RX_STAT(CBFCPAUSEFramesReceived_15);
+
+ return data;
+
+#undef TX_STAT
+#undef RX_STAT
+}
+
+static void fun_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq_stats txs;
+ struct funeth_rxq_stats rxs;
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+ u64 *totals, *tot;
+
+ if (fp->port_caps & FUN_PORT_CAP_STATS)
+ data = get_mac_stats(fp, data);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return;
+
+#define ADD_STAT(cnt) do { \
+ *data = (cnt); *tot++ += *data++; \
+} while (0)
+
+ /* Tx queues */
+ totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_cso);
+ ADD_STAT(txs.tx_tso);
+ ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_more);
+ ADD_STAT(txs.tx_nstops);
+ ADD_STAT(txs.tx_nrestarts);
+ ADD_STAT(txs.tx_map_err);
+ ADD_STAT(txs.tx_tls_pkts);
+ ADD_STAT(txs.tx_tls_bytes);
+ ADD_STAT(txs.tx_tls_fallback);
+ ADD_STAT(txs.tx_tls_drops);
+ }
+ data += ARRAY_SIZE(txq_stat_names);
+
+ /* XDP Tx queues */
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_xdp_full);
+ ADD_STAT(txs.tx_map_err);
+ }
+ data += ARRAY_SIZE(xdpq_stat_names);
+
+ /* Rx queues */
+ totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+
+ ADD_STAT(rxs.rx_pkts);
+ ADD_STAT(rxs.rx_bytes);
+ ADD_STAT(rxs.rx_cso);
+ ADD_STAT(rxs.gro_pkts);
+ ADD_STAT(rxs.gro_merged);
+ ADD_STAT(rxs.xdp_tx);
+ ADD_STAT(rxs.xdp_redir);
+ ADD_STAT(rxs.xdp_drops);
+ ADD_STAT(rxs.rx_bufs);
+ ADD_STAT(rxs.rx_page_alloc);
+ ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
+ ADD_STAT(rxs.rx_budget);
+ ADD_STAT(rxs.rx_map_err);
+ }
+ data += ARRAY_SIZE(rxq_stat_names);
+#undef ADD_STAT
+
+ *data++ = atomic64_read(&fp->tx_tls_add);
+ *data++ = atomic64_read(&fp->tx_tls_del);
+ *data++ = atomic64_read(&fp->tx_tls_resync);
+}
+
+#define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
+#define TX_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+#define FEC_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
+ PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
+
+static void fun_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
+ stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
+}
+
+static void fun_get_802_3_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
+ stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
+ stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
+ stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
+ stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
+ stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
+ stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
+}
+
+static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
+ stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
+}
+
+static void fun_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ static const struct ethtool_rmon_hist_range rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 32767 },
+ {}
+ };
+
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
+ stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
+ stats->fragments = RX_STAT(fp, etherStatsFragments);
+ stats->jabbers = RX_STAT(fp, etherStatsJabbers);
+
+ stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ *ranges = rmon_ranges;
+}
+
+static void fun_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
+ stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
+}
+
+#undef RX_STAT
+#undef TX_STAT
+#undef FEC_STAT
+
+static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = netdev->real_num_rx_queues;
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ return 0;
+}
+
+static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->indir_table_nentries;
+}
+
+static u32 fun_get_rxfh_key_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return sizeof(fp->rss_key);
+}
+
+static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, fp->indir_table,
+ sizeof(u32) * fp->indir_table_nentries);
+
+ if (key)
+ memcpy(key, fp->rss_key, sizeof(fp->rss_key));
+
+ if (hfunc)
+ *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
+ ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ const u32 *rss_indir = indir ? indir : fp->indir_table;
+ const u8 *rss_key = key ? key : fp->rss_key;
+ enum fun_eth_hash_alg algo;
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ algo = fp->hash_algo;
+ else if (hfunc == ETH_RSS_HASH_CRC32)
+ algo = FUN_ETH_RSS_ALG_CRC32;
+ else if (hfunc == ETH_RSS_HASH_TOP)
+ algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ else
+ return -EINVAL;
+
+ /* If the port is enabled try to reconfigure RSS and keep the new
+ * settings if successful. If it is down we update the RSS settings
+ * and apply them at the next UP time.
+ */
+ if (netif_running(netdev)) {
+ int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
+ FUN_ADMIN_SUBOP_MODIFY);
+ if (rc)
+ return rc;
+ }
+
+ fp->hash_algo = algo;
+ if (key)
+ memcpy(fp->rss_key, key, sizeof(fp->rss_key));
+ if (indir)
+ memcpy(fp->indir_table, indir,
+ sizeof(u32) * fp->indir_table_nentries);
+ return 0;
+}
+
+static int fun_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static unsigned int to_ethtool_fec(unsigned int fun_fec)
+{
+ unsigned int fec = 0;
+
+ if (fun_fec == FUN_PORT_FEC_NA)
+ fec |= ETHTOOL_FEC_NONE;
+ if (fun_fec & FUN_PORT_FEC_OFF)
+ fec |= ETHTOOL_FEC_OFF;
+ if (fun_fec & FUN_PORT_FEC_RS)
+ fec |= ETHTOOL_FEC_RS;
+ if (fun_fec & FUN_PORT_FEC_FC)
+ fec |= ETHTOOL_FEC_BASER;
+ if (fun_fec & FUN_PORT_FEC_AUTO)
+ fec |= ETHTOOL_FEC_AUTO;
+ return fec;
+}
+
+static int fun_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_data;
+ int rc;
+
+ rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
+ if (rc)
+ return rc;
+
+ fec->active_fec = to_ethtool_fec(fec_data & 0xff);
+ fec->fec = to_ethtool_fec(fec_data >> 8);
+ return 0;
+}
+
+static int fun_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_mode;
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_mode = FUN_PORT_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_OFF:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_BASER:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_FC;
+ break;
+ case ETHTOOL_FEC_RS:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_RS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
+}
+
+static const struct ethtool_ops fun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link_ksettings = fun_get_link_ksettings,
+ .set_link_ksettings = fun_set_link_ksettings,
+ .set_phys_id = fun_set_phys_id,
+ .get_drvinfo = fun_get_drvinfo,
+ .get_msglevel = fun_get_msglevel,
+ .set_msglevel = fun_set_msglevel,
+ .get_regs_len = fun_get_regs_len,
+ .get_regs = fun_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fun_get_coalesce,
+ .set_coalesce = fun_set_coalesce,
+ .get_ts_info = fun_get_ts_info,
+ .get_ringparam = fun_get_ringparam,
+ .set_ringparam = fun_set_ringparam,
+ .get_sset_count = fun_get_sset_count,
+ .get_strings = fun_get_strings,
+ .get_ethtool_stats = fun_get_ethtool_stats,
+ .get_rxnfc = fun_get_rxnfc,
+ .set_rxnfc = fun_set_rxnfc,
+ .get_rxfh_indir_size = fun_get_rxfh_indir_size,
+ .get_rxfh_key_size = fun_get_rxfh_key_size,
+ .get_rxfh = fun_get_rxfh,
+ .set_rxfh = fun_set_rxfh,
+ .get_channels = fun_get_channels,
+ .set_channels = fun_set_channels,
+ .get_fecparam = fun_get_fecparam,
+ .set_fecparam = fun_set_fecparam,
+ .get_pauseparam = fun_get_pauseparam,
+ .set_pauseparam = fun_set_pauseparam,
+ .nway_reset = fun_restart_an,
+ .get_pause_stats = fun_get_pause_stats,
+ .get_fec_stats = fun_get_fec_stats,
+ .get_eth_mac_stats = fun_get_802_3_stats,
+ .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
+ .get_rmon_stats = fun_get_rmon_stats,
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &fun_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.c b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
new file mode 100644
index 000000000000..f871def70d70
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_ktls.h"
+
+static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
+{
+ struct fun_admin_ktls_create_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_CREATE,
+ .id = cpu_to_be32(id),
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_MODIFY,
+ .id = cpu_to_be32(fp->ktls_id),
+ .tcp_seq = cpu_to_be32(start_offload_tcp_sn),
+ };
+ struct fun_admin_ktls_modify_rsp rsp;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ if (crypto_info->version == TLS_1_2_VERSION)
+ req.version = FUN_KTLS_TLSV2;
+ else
+ return -EOPNOTSUPP;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
+
+ req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
+ memcpy(req.key, c->key, sizeof(c->key));
+ memcpy(req.iv, c->iv, sizeof(c->iv));
+ memcpy(req.salt, c->salt, sizeof(c->salt));
+ memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
+ sizeof(rsp), 0);
+ memzero_explicit(&req, sizeof(req));
+ if (rc)
+ return rc;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+ tx_ctx->tlsid = rsp.tlsid;
+ tx_ctx->next_seq = start_offload_tcp_sn;
+ atomic64_inc(&fp->tx_tls_add);
+ return 0;
+}
+
+static void fun_ktls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return;
+
+ tx_ctx = __tls_driver_ctx(tls_ctx, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+
+ fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ atomic64_inc(&fp->tx_tls_del);
+}
+
+static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, key));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = 0;
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+ req.tcp_seq = cpu_to_be32(seq);
+ req.version = 0;
+ req.cipher = 0;
+ memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
+
+ atomic64_inc(&fp->tx_tls_resync);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ if (!rc)
+ tx_ctx->next_seq = seq;
+ return rc;
+}
+
+static const struct tlsdev_ops fun_ktls_ops = {
+ .tls_dev_add = fun_ktls_add,
+ .tls_dev_del = fun_ktls_del,
+ .tls_dev_resync = fun_ktls_resync,
+};
+
+int fun_ktls_init(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_admin_ktls_create(fp, netdev->dev_port);
+ if (rc)
+ return rc;
+
+ fp->ktls_id = netdev->dev_port;
+ netdev->tlsdev_ops = &fun_ktls_ops;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ return 0;
+}
+
+void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+ if (fp->ktls_id == FUN_HCI_ID_INVALID)
+ return;
+
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.h b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
new file mode 100644
index 000000000000..9d6f2141a959
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_KTLS_H
+#define _FUN_KTLS_H
+
+#include <net/tls.h>
+
+struct funeth_priv;
+
+struct fun_ktls_tx_ctx {
+ __be64 tlsid;
+ u32 next_seq;
+};
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+int fun_ktls_init(struct net_device *netdev);
+void fun_ktls_cleanup(struct funeth_priv *fp);
+
+#else
+
+static inline void fun_ktls_init(struct net_device *netdev)
+{
+}
+
+static inline void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+}
+#endif
+
+#endif /* _FUN_KTLS_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
new file mode 100644
index 000000000000..67dd02ed1fa3
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -0,0 +1,2091 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf.h>
+#include <linux/crash_dump.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/idr.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+#include "funeth_ktls.h"
+#include "fun_port.h"
+#include "fun_queue.h"
+#include "funeth_txrx.h"
+
+#define ADMIN_SQ_DEPTH 32
+#define ADMIN_CQ_DEPTH 64
+#define ADMIN_RQ_DEPTH 16
+
+/* Default number of Tx/Rx queues. */
+#define FUN_DFLT_QUEUES 16U
+
+enum {
+ FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL,
+ FUN_SERV_DEL_PORTS,
+};
+
+static const struct pci_device_id funeth_id_table[] = {
+ { PCI_VDEVICE(FUNGIBLE, 0x0101) },
+ { PCI_VDEVICE(FUNGIBLE, 0x0181) },
+ { 0, }
+};
+
+/* Issue a port write admin command with @n key/value pairs. */
+static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, const u64 *data)
+{
+ unsigned int cmd_size, i;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) +
+ n * sizeof(struct fun_admin_write48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.write =
+ FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.write.write48[i] =
+ FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]);
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+}
+
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
+{
+ return fun_port_write_cmds(fp, 1, &key, &data);
+}
+
+/* Issue a port read admin command with @n key/value pairs. */
+static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, u64 *data)
+{
+ const struct fun_admin_read48_rsp *r48rsp;
+ unsigned int cmd_size, i;
+ int rc;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) +
+ n * sizeof(struct fun_admin_read48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.read =
+ FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+ if (rc)
+ return rc;
+
+ for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) {
+ data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data);
+ dev_dbg(fp->fdev->dev,
+ "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld",
+ fp->lport, r48rsp->key_to_data, keys[i], data[i],
+ FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data));
+ }
+ return 0;
+}
+
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
+{
+ return fun_port_read_cmds(fp, 1, &key, data);
+}
+
+static void fun_report_link(struct net_device *netdev)
+{
+ if (netif_carrier_ok(netdev)) {
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ const char *fec = "", *pause = "";
+ int speed = fp->link_speed;
+ char unit = 'M';
+
+ if (fp->link_speed >= SPEED_1000) {
+ speed /= 1000;
+ unit = 'G';
+ }
+
+ if (fp->active_fec & FUN_PORT_FEC_RS)
+ fec = ", RS-FEC";
+ else if (fp->active_fec & FUN_PORT_FEC_FC)
+ fec = ", BASER-FEC";
+
+ if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK)
+ pause = ", Tx/Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE)
+ pause = ", Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE)
+ pause = ", Tx PAUSE";
+
+ netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n",
+ speed, unit, pause, fec);
+ } else {
+ netdev_info(netdev, "Link down\n");
+ }
+}
+
+static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
+ unsigned int adi_id, const struct fun_adi_param *param)
+{
+ struct fun_admin_adi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI,
+ sizeof(req)),
+ .u.write.subop = FUN_ADMIN_SUBOP_WRITE,
+ .u.write.attribute = attr,
+ .u.write.id = cpu_to_be32(adi_id),
+ .u.write.param = *param
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+
+/* Configure RSS for the given port. @op determines whether a new RSS context
+ * is to be created or whether an existing one should be reconfigured. The
+ * remaining parameters specify the hashing algorithm, key, and indirection
+ * table.
+ *
+ * This initiates packet delivery to the Rx queues set in the indirection
+ * table.
+ */
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int table_len = fp->indir_table_nentries;
+ unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len;
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+ union {
+ struct {
+ struct fun_admin_rss_req req;
+ struct fun_dataop_gl gl;
+ };
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ __be32 *indir_tab;
+ u16 flags;
+ int rc;
+
+ if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID)
+ return -EINVAL;
+
+ flags = op == FUN_ADMIN_SUBOP_CREATE ?
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0;
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS,
+ sizeof(cmd));
+ cmd.req.u.create =
+ FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id,
+ dev->dev_port, algo,
+ FUN_ETH_RSS_MAX_KEY_SIZE,
+ table_len, 0,
+ FUN_ETH_RSS_MAX_KEY_SIZE);
+ cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr);
+
+ /* write the key and indirection table into the RSS DMA area */
+ memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE);
+ indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE;
+ for (rc = 0; rc < table_len; rc++)
+ *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (!rc && op == FUN_ADMIN_SUBOP_CREATE)
+ fp->rss_hw_id = be32_to_cpu(cmd.rsp.id);
+ return rc;
+}
+
+/* Destroy the HW RSS conntext associated with the given port. This also stops
+ * all packet delivery to our Rx queues.
+ */
+static void fun_destroy_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_hw_id != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id);
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ }
+}
+
+static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify);
+
+ cpumask_copy(&p->affinity_mask, mask);
+}
+
+static void fun_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
+/* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
+ * and add it to the IRQ XArray.
+ */
+static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
+ int node, unsigned int xa_idx_offset)
+{
+ struct fun_irq *irq;
+ int cpu, res;
+
+ cpu = cpumask_local_spread(idx, node);
+ node = cpu_to_mem(cpu);
+
+ irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+
+ res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx);
+ if (res != 1)
+ goto free_irq;
+
+ res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL);
+ if (res)
+ goto release_irq;
+
+ irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx);
+ cpumask_set_cpu(cpu, &irq->affinity_mask);
+ irq->aff_notify.notify = fun_irq_aff_notify;
+ irq->aff_notify.release = fun_irq_aff_release;
+ irq->state = FUN_IRQ_INIT;
+ return irq;
+
+release_irq:
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+free_irq:
+ kfree(irq);
+ return ERR_PTR(res);
+}
+
+static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
+{
+ netif_napi_del(&irq->napi);
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+ kfree(irq);
+}
+
+/* Release the IRQs reserved for Tx/Rx queues that aren't being used. */
+static void fun_prune_queue_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int nreleased = 0;
+ struct fun_irq *irq;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, irq) {
+ if (irq->txq || irq->rxq) /* skip those in use */
+ continue;
+
+ xa_erase(&fp->irqs, idx);
+ fun_free_qirq(fp, irq);
+ nreleased++;
+ if (idx < fp->rx_irq_ofst)
+ fp->num_tx_irqs--;
+ else
+ fp->num_rx_irqs--;
+ }
+ netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased);
+}
+
+/* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx
+ * and @nrx. IRQs are added incrementally to those we already have.
+ * We hold on to allocated IRQs until garbage collection of unused IRQs is
+ * separately requested.
+ */
+static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int node = dev_to_node(&fp->pdev->dev);
+ struct fun_irq *irq;
+ unsigned int i;
+
+ for (i = fp->num_tx_irqs; i < ntx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, 0);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_tx_irqs++;
+ netif_tx_napi_add(dev, &irq->napi, fun_txq_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+
+ for (i = fp->num_rx_irqs; i < nrx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_rx_irqs++;
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+
+ netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
+ ntx, nrx);
+ return 0;
+}
+
+static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && txqs[i]; i++)
+ txqs[i] = funeth_txq_free(txqs[i], state);
+}
+
+static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
+ unsigned int nqs, unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i),
+ state, &txqs[i]);
+ if (err) {
+ free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && rxqs[i]; i++)
+ rxqs[i] = funeth_rxq_free(rxqs[i], state);
+}
+
+static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
+ unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
+ unsigned int start, int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_rxq_create(dev, i, ncqe, nrqe,
+ xa_load(&fp->irqs, i + fp->rx_irq_ofst),
+ state, &rxqs[i]);
+ if (err) {
+ free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && xdpqs[i]; i++)
+ xdpqs[i] = funeth_txq_free(xdpqs[i], state);
+
+ if (state == FUN_QSTATE_DESTROYED)
+ kfree(xdpqs);
+}
+
+static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
+ unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_txq **xdpqs;
+ unsigned int i;
+ int err;
+
+ xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL);
+ if (!xdpqs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]);
+ if (err) {
+ free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return ERR_PTR(err);
+ }
+ }
+ return xdpqs;
+}
+
+static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs = qset->xdpqs;
+ struct funeth_rxq **rxqs = qset->rxqs;
+
+ /* qset may not specify any queues to operate on. In that case the
+ * currently installed queues are implied.
+ */
+ if (!rxqs) {
+ rxqs = rtnl_dereference(fp->rxqs);
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ qset->txqs = fp->txqs;
+ qset->nrxqs = netdev->real_num_rx_queues;
+ qset->ntxqs = netdev->real_num_tx_queues;
+ qset->nxdpqs = fp->num_xdpqs;
+ }
+ if (!rxqs)
+ return;
+
+ if (rxqs == rtnl_dereference(fp->rxqs)) {
+ rcu_assign_pointer(fp->rxqs, NULL);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ fp->txqs = NULL;
+ }
+
+ free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state);
+ free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state);
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state);
+ if (qset->state == FUN_QSTATE_DESTROYED)
+ kfree(rxqs);
+
+ /* Tell the caller which queues were operated on. */
+ qset->rxqs = rxqs;
+ qset->xdpqs = xdpqs;
+}
+
+static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_txq **xdpqs = NULL, **txqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs);
+ if (err)
+ return err;
+
+ rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL);
+ if (!rxqs)
+ return -ENOMEM;
+
+ if (qset->nxdpqs) {
+ xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth,
+ qset->xdpq_start, qset->state);
+ if (IS_ERR(xdpqs)) {
+ err = PTR_ERR(xdpqs);
+ goto free_qvec;
+ }
+ }
+
+ txqs = (struct funeth_txq **)&rxqs[qset->nrxqs];
+ err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth,
+ qset->txq_start, qset->state);
+ if (err)
+ goto free_xdpqs;
+
+ err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth,
+ qset->rq_depth, qset->rxq_start, qset->state);
+ if (err)
+ goto free_txqs;
+
+ qset->rxqs = rxqs;
+ qset->txqs = txqs;
+ qset->xdpqs = xdpqs;
+ return 0;
+
+free_txqs:
+ free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED);
+free_xdpqs:
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED);
+free_qvec:
+ kfree(rxqs);
+ return err;
+}
+
+/* Take queues to the next level. Presently this means creating them on the
+ * device.
+ */
+static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ for (i = 0; i < qset->nrxqs; i++) {
+ err = fun_rxq_create_dev(qset->rxqs[i],
+ xa_load(&fp->irqs,
+ i + fp->rx_irq_ofst));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->ntxqs; i++) {
+ err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->nxdpqs; i++) {
+ err = fun_txq_create_dev(qset->xdpqs[i], NULL);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int fun_port_create(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ } cmd;
+ int rc;
+
+ if (fp->lport != INVALID_LPORT)
+ return 0;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0,
+ netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+
+ if (!rc)
+ fp->lport = be16_to_cpu(cmd.rsp.u.create.lport);
+ return rc;
+}
+
+static int fun_port_destroy(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (fp->lport == INVALID_LPORT)
+ return 0;
+
+ fp->lport = INVALID_LPORT;
+ return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0,
+ netdev->dev_port);
+}
+
+static int fun_eth_create(struct funeth_priv *fp)
+{
+ union {
+ struct fun_admin_eth_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH,
+ sizeof(cmd.req));
+ cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT(
+ FUN_ADMIN_SUBOP_CREATE,
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR,
+ 0, fp->netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.id);
+}
+
+static int fun_vi_create(struct funeth_priv *fp)
+{
+ struct fun_admin_vi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI,
+ sizeof(req)),
+ .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE,
+ 0,
+ fp->netdev->dev_port,
+ fp->netdev->dev_port)
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+/* Helper to create an ETH flow and bind an SQ to it.
+ * Returns the ETH id (>= 0) on success or a negative error.
+ */
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
+{
+ int rc, ethid;
+
+ ethid = fun_eth_create(fp);
+ if (ethid >= 0) {
+ rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid,
+ FUN_ADMIN_BIND_TYPE_ETH, ethid);
+ if (rc) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid);
+ ethid = rc;
+ }
+ }
+ return ethid;
+}
+
+static irqreturn_t fun_queue_irq_handler(int irq, void *data)
+{
+ struct fun_irq *p = data;
+
+ if (p->rxq) {
+ prefetch(p->rxq->next_cqe_info);
+ p->rxq->irq_cnt++;
+ }
+ napi_schedule_irqoff(&p->napi);
+ return IRQ_HANDLED;
+}
+
+static int fun_enable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned long idx, last;
+ unsigned int qidx;
+ struct fun_irq *p;
+ const char *qtype;
+ int err;
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->txq) {
+ qtype = "tx";
+ qidx = p->txq->qidx;
+ } else if (p->rxq) {
+ qtype = "rx";
+ qidx = p->rxq->qidx;
+ } else {
+ continue;
+ }
+
+ if (p->state != FUN_IRQ_INIT)
+ continue;
+
+ snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name,
+ qtype, qidx);
+ err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p);
+ if (err) {
+ netdev_err(dev, "Failed to allocate IRQ %u, err %d\n",
+ p->irq, err);
+ goto unroll;
+ }
+ p->state = FUN_IRQ_REQUESTED;
+ }
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->state != FUN_IRQ_REQUESTED)
+ continue;
+ irq_set_affinity_notifier(p->irq, &p->aff_notify);
+ irq_set_affinity_and_hint(p->irq, &p->affinity_mask);
+ napi_enable(&p->napi);
+ p->state = FUN_IRQ_ENABLED;
+ }
+
+ return 0;
+
+unroll:
+ last = idx - 1;
+ xa_for_each_range(&fp->irqs, idx, p, 0, last)
+ if (p->state == FUN_IRQ_REQUESTED) {
+ free_irq(p->irq, p);
+ p->state = FUN_IRQ_INIT;
+ }
+
+ return err;
+}
+
+static void fun_disable_one_irq(struct fun_irq *irq)
+{
+ napi_disable(&irq->napi);
+ irq_set_affinity_notifier(irq->irq, NULL);
+ irq_update_affinity_hint(irq->irq, NULL);
+ free_irq(irq->irq, irq);
+ irq->state = FUN_IRQ_INIT;
+}
+
+static void fun_disable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_irq *p;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, p)
+ if (p->state == FUN_IRQ_ENABLED)
+ fun_disable_one_irq(p);
+}
+
+static void fun_down(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ /* If we don't have queues the data path is already down.
+ * Note netif_running(dev) may be true.
+ */
+ if (!rcu_access_pointer(fp->rxqs))
+ return;
+
+ /* It is also down if the queues aren't on the device. */
+ if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) {
+ netif_info(fp, ifdown, dev,
+ "Tearing down data path on device\n");
+ fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ fun_destroy_rss(fp);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+ fun_disable_irqs(dev);
+ }
+
+ fun_free_rings(dev, qset);
+}
+
+static int fun_up(struct net_device *dev, struct fun_qset *qset)
+{
+ static const int port_keys[] = {
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH,
+ FUN_ADMIN_PORT_KEY_ENABLE
+ };
+
+ struct funeth_priv *fp = netdev_priv(dev);
+ u64 vals[] = {
+ lower_32_bits(fp->stats_dma_addr),
+ upper_32_bits(fp->stats_dma_addr),
+ FUN_PORT_FLAG_ENABLE_NOTIFY
+ };
+ int err;
+
+ netif_info(fp, ifup, dev, "Setting up data path on device\n");
+
+ if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) {
+ err = fun_advance_ring_state(dev, qset);
+ if (err)
+ return err;
+ }
+
+ err = fun_vi_create(fp);
+ if (err)
+ goto free_queues;
+
+ fp->txqs = qset->txqs;
+ rcu_assign_pointer(fp->rxqs, qset->rxqs);
+ rcu_assign_pointer(fp->xdpqs, qset->xdpqs);
+
+ err = fun_enable_irqs(dev);
+ if (err)
+ goto destroy_vi;
+
+ if (fp->rss_cfg) {
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_CREATE);
+ } else {
+ /* The non-RSS case has only 1 queue. */
+ err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port,
+ FUN_ADMIN_BIND_TYPE_EPCQ,
+ qset->rxqs[0]->hw_cqid);
+ }
+ if (err)
+ goto disable_irqs;
+
+ err = fun_port_write_cmds(fp, 3, port_keys, vals);
+ if (err)
+ goto free_rss;
+
+ netif_tx_start_all_queues(dev);
+ return 0;
+
+free_rss:
+ fun_destroy_rss(fp);
+disable_irqs:
+ fun_disable_irqs(dev);
+destroy_vi:
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+free_queues:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int funeth_open(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_qset qset = {
+ .nrxqs = netdev->real_num_rx_queues,
+ .ntxqs = netdev->real_num_tx_queues,
+ .nxdpqs = fp->num_xdpqs,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL,
+ };
+ int rc;
+
+ rc = fun_alloc_rings(netdev, &qset);
+ if (rc)
+ return rc;
+
+ rc = fun_up(netdev, &qset);
+ if (rc) {
+ qset.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(netdev, &qset);
+ }
+
+ return rc;
+}
+
+static int funeth_close(struct net_device *netdev)
+{
+ struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED };
+
+ fun_down(netdev, &qset);
+ return 0;
+}
+
+static void fun_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+
+ stats->tx_packets = fp->tx_packets;
+ stats->tx_bytes = fp->tx_bytes;
+ stats->tx_dropped = fp->tx_dropped;
+
+ stats->rx_packets = fp->rx_packets;
+ stats->rx_bytes = fp->rx_bytes;
+ stats->rx_dropped = fp->rx_dropped;
+
+ rcu_read_lock();
+ rxqs = rcu_dereference(fp->rxqs);
+ if (!rxqs)
+ goto unlock;
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ stats->tx_dropped += txs.tx_map_err;
+ }
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ struct funeth_rxq_stats rxs;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+ stats->rx_packets += rxs.rx_pkts;
+ stats->rx_bytes += rxs.rx_bytes;
+ stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops;
+ }
+
+ xdpqs = rcu_dereference(fp->xdpqs);
+ if (!xdpqs)
+ goto unlock;
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ }
+unlock:
+ rcu_read_unlock();
+}
+
+static int fun_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
+ if (!rc)
+ netdev->mtu = new_mtu;
+ return rc;
+}
+
+static int fun_set_macaddr(struct net_device *netdev, void *addr)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int rc;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(saddr->sa_data));
+ if (!rc)
+ eth_hw_addr_set(netdev, saddr->sa_data);
+ return rc;
+}
+
+static int fun_get_port_attributes(struct net_device *netdev)
+{
+ static const int keys[] = {
+ FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES,
+ FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU
+ };
+ static const int phys_keys[] = {
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS,
+ };
+
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 data[ARRAY_SIZE(keys)];
+ u8 mac[ETH_ALEN];
+ int i, rc;
+
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(keys); i++) {
+ switch (keys[i]) {
+ case FUN_ADMIN_PORT_KEY_MACADDR:
+ u64_to_ether_addr(data[i], mac);
+ if (is_zero_ether_addr(mac)) {
+ eth_hw_addr_random(netdev);
+ } else if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(netdev, mac);
+ } else {
+ netdev_err(netdev,
+ "device provided a bad MAC address %pM\n",
+ mac);
+ return -EINVAL;
+ }
+ break;
+
+ case FUN_ADMIN_PORT_KEY_CAPABILITIES:
+ fp->port_caps = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_ADVERT:
+ fp->advertising = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_MTU:
+ netdev->mtu = data[i];
+ break;
+ }
+ }
+
+ if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) {
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys,
+ data);
+ if (rc)
+ return rc;
+
+ fp->lane_attrs = data[0];
+ }
+
+ if (netdev->addr_assign_type == NET_ADDR_RANDOM)
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(netdev->dev_addr));
+ return 0;
+}
+
+static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
+ sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
+}
+
+static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* no TX HW timestamps */
+ cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ fp->hwtstamp_cfg = cfg;
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return fun_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return fun_hwtstamp_get(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Prepare the queues for XDP. */
+static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i, nqs = num_online_cpus();
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL);
+ if (IS_ERR(xdpqs))
+ return PTR_ERR(xdpqs);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ err = fun_rxq_set_bpf(rxqs[i], prog);
+ if (err)
+ goto out;
+ }
+
+ fp->num_xdpqs = nqs;
+ rcu_assign_pointer(fp->xdpqs, xdpqs);
+ return 0;
+out:
+ while (i--)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+
+ free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED);
+ return err;
+}
+
+/* Set the queues for non-XDP operation. */
+static void fun_end_xdp(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i;
+
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ /* at this point both Rx and Tx XDP processing has ended */
+
+ free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED);
+ fp->num_xdpqs = 0;
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+}
+
+#define XDP_MAX_MTU \
+ (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
+
+static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct bpf_prog *old_prog, *prog = xdp->prog;
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ /* XDP uses at most one buffer */
+ if (prog && dev->mtu > XDP_MAX_MTU) {
+ netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu);
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Device MTU too large for XDP");
+ return -EINVAL;
+ }
+
+ if (!netif_running(dev)) {
+ fp->num_xdpqs = prog ? num_online_cpus() : 0;
+ } else if (prog && !fp->xdp_prog) {
+ err = fun_enter_xdp(dev, prog);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Failed to set queues for XDP.");
+ return err;
+ }
+ } else if (!prog && fp->xdp_prog) {
+ fun_end_xdp(dev);
+ } else {
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->xdp_prog, prog);
+ }
+
+ dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
+ old_prog = xchg(&fp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return fun_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct devlink_port *fun_get_devlink_port(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ return &fp->dl_port;
+}
+
+static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
+{
+ if (ed->num_vports)
+ return -EINVAL;
+
+ ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL);
+ if (!ed->vport_info)
+ return -ENOMEM;
+ ed->num_vports = n;
+ return 0;
+}
+
+static void fun_free_vports(struct fun_ethdev *ed)
+{
+ kvfree(ed->vport_info);
+ ed->vport_info = NULL;
+ ed->num_vports = 0;
+}
+
+static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
+ unsigned int vport)
+{
+ if (!ed->vport_info || vport >= ed->num_vports)
+ return NULL;
+
+ return ed->vport_info + vport;
+}
+
+static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param mac_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (is_multicast_ether_addr(mac))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac));
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1,
+ &mac_param);
+ if (!rc)
+ ether_addr_copy(vi->mac, mac);
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param vlan_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) &&
+ vlan_proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto),
+ ((u16)qos << VLAN_PRIO_SHIFT) | vlan);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param);
+ if (!rc) {
+ vi->vlan = vlan;
+ vi->qos = qos;
+ vi->vlan_proto = vlan_proto;
+ }
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param rate_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (min_tx_rate)
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param);
+ if (!rc)
+ vi->max_rate = max_tx_rate;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_ethdev *ed = to_fun_ethdev(fp->fdev);
+ const struct fun_vport_info *vi;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ memset(ivi, 0, sizeof(*ivi));
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, vi->mac);
+ ivi->vlan = vi->vlan;
+ ivi->qos = vi->qos;
+ ivi->vlan_proto = vi->vlan_proto;
+ ivi->max_tx_rate = vi->max_rate;
+ ivi->spoofchk = vi->spoofchk;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return vi ? 0 : -EINVAL;
+}
+
+static void fun_uninit(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ fun_prune_queue_irqs(dev);
+ xa_destroy(&fp->irqs);
+}
+
+static const struct net_device_ops fun_netdev_ops = {
+ .ndo_open = funeth_open,
+ .ndo_stop = funeth_close,
+ .ndo_start_xmit = fun_start_xmit,
+ .ndo_get_stats64 = fun_get_stats64,
+ .ndo_change_mtu = fun_change_mtu,
+ .ndo_set_mac_address = fun_set_macaddr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = fun_ioctl,
+ .ndo_uninit = fun_uninit,
+ .ndo_bpf = fun_xdp,
+ .ndo_xdp_xmit = fun_xdp_xmit_frames,
+ .ndo_set_vf_mac = fun_set_vf_mac,
+ .ndo_set_vf_vlan = fun_set_vf_vlan,
+ .ndo_set_vf_rate = fun_set_vf_rate,
+ .ndo_get_vf_config = fun_get_vf_config,
+ .ndo_get_devlink_port = fun_get_devlink_port,
+};
+
+#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
+ GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
+
+static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
+{
+ unsigned int i;
+
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx);
+}
+
+/* Reset the RSS indirection table to equal distribution across the current
+ * number of Rx queues. Called at init time and whenever the number of Rx
+ * queues changes subsequently. Note that this may also resize the indirection
+ * table.
+ */
+static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ if (!fp->rss_cfg)
+ return;
+
+ /* Set the table size to the max possible that allows an equal number
+ * of occurrences of each CQ.
+ */
+ fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx);
+ fun_dflt_rss_indir(fp, nrx);
+}
+
+/* Update the RSS LUT to contain only queues in [0, nrx). Normally this will
+ * update the LUT to an equal distribution among nrx queues, If @only_if_needed
+ * is set the LUT is left unchanged if it already does not reference any queues
+ * >= nrx.
+ */
+static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
+ bool only_if_needed)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT];
+ unsigned int i, oldsz;
+ int err;
+
+ if (!fp->rss_cfg)
+ return 0;
+
+ if (only_if_needed) {
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ if (fp->indir_table[i] >= nrx)
+ break;
+
+ if (i >= fp->indir_table_nentries)
+ return 0;
+ }
+
+ memcpy(old_lut, fp->indir_table, sizeof(old_lut));
+ oldsz = fp->indir_table_nentries;
+ fun_reset_rss_indir(dev, nrx);
+
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_MODIFY);
+ if (!err)
+ return 0;
+
+ memcpy(fp->indir_table, old_lut, sizeof(old_lut));
+ fp->indir_table_nentries = oldsz;
+ return err;
+}
+
+/* Allocate the DMA area for the RSS configuration commands to the device, and
+ * initialize the hash, hash key, indirection table size and its entries to
+ * their defaults. The indirection table defaults to equal distribution across
+ * the Rx queues.
+ */
+static int fun_init_rss(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table);
+
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS))
+ return 0;
+
+ fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size,
+ &fp->rss_dma_addr, GFP_KERNEL);
+ if (!fp->rss_cfg)
+ return -ENOMEM;
+
+ fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key));
+ fun_reset_rss_indir(dev, dev->real_num_rx_queues);
+ return 0;
+}
+
+static void fun_free_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_cfg) {
+ dma_free_coherent(&fp->pdev->dev,
+ sizeof(fp->rss_key) + sizeof(fp->indir_table),
+ fp->rss_cfg, fp->rss_dma_addr);
+ fp->rss_cfg = NULL;
+ }
+}
+
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx)
+{
+ netif_set_real_num_tx_queues(netdev, ntx);
+ if (nrx != netdev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(netdev, nrx);
+ fun_reset_rss_indir(netdev, nrx);
+ }
+}
+
+static int fun_init_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return 0;
+
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX +
+ PORT_MAC_FEC_STATS_MAX;
+
+ fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ &fp->stats_dma_addr, GFP_KERNEL);
+ if (!fp->stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void fun_free_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (fp->stats) {
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX;
+ dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ fp->stats, fp->stats_dma_addr);
+ fp->stats = NULL;
+ }
+}
+
+static int fun_dl_port_register(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct devlink *dl = priv_to_devlink(fp->fdev);
+ struct devlink_port_attrs attrs = {};
+ unsigned int idx;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+ idx = fp->lport;
+ } else {
+ idx = netdev->dev_port;
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.lanes = fp->lane_attrs & 7;
+ if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) {
+ attrs.split = 1;
+ attrs.phys.port_number = fp->lport & ~3;
+ attrs.phys.split_subport_number = fp->lport & 3;
+ } else {
+ attrs.phys.port_number = fp->lport;
+ }
+ }
+
+ devlink_port_attrs_set(&fp->dl_port, &attrs);
+
+ return devlink_port_register(dl, &fp->dl_port, idx);
+}
+
+/* Determine the max Tx/Rx queues for a port. */
+static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
+ unsigned int *nrx)
+{
+ int neth;
+
+ if (ed->num_ports > 1 || is_kdump_kernel()) {
+ *ntx = 1;
+ *nrx = 1;
+ return 0;
+ }
+
+ neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH);
+ if (neth < 0)
+ return neth;
+
+ /* We determine the max number of queues based on the CPU
+ * cores, device interrupts and queues, RSS size, and device Tx flows.
+ *
+ * - At least 1 Rx and 1 Tx queues.
+ * - At most 1 Rx/Tx queue per core.
+ * - Each Rx/Tx queue needs 1 SQ.
+ */
+ *ntx = min(ed->nsqs_per_port - 1, num_online_cpus());
+ *nrx = *ntx;
+ if (*ntx > neth)
+ *ntx = neth;
+ if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT)
+ *nrx = FUN_ETH_RSS_MAX_INDIR_ENT;
+ return 0;
+}
+
+static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
+{
+ unsigned int ntx, nrx;
+
+ ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES);
+ nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES);
+ if (ntx <= nrx) {
+ ntx = min(ntx, nsqs / 2);
+ nrx = min(nrx, nsqs - ntx);
+ } else {
+ nrx = min(nrx, nsqs / 2);
+ ntx = min(ntx, nsqs - nrx);
+ }
+
+ netif_set_real_num_tx_queues(dev, ntx);
+ netif_set_real_num_rx_queues(dev, nrx);
+}
+
+/* Replace the existing Rx/Tx/XDP queues with equal number of queues with
+ * different settings, e.g. depth. This is a disruptive replacement that
+ * temporarily shuts down the data path and should be limited to changes that
+ * can't be applied to live queues. The old queues are always discarded.
+ */
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack)
+{
+ struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED };
+ struct funeth_priv *fp = netdev_priv(dev);
+ int err;
+
+ newqs->nrxqs = dev->real_num_rx_queues;
+ newqs->ntxqs = dev->real_num_tx_queues;
+ newqs->nxdpqs = fp->num_xdpqs;
+ newqs->state = FUN_QSTATE_INIT_SW;
+ err = fun_alloc_rings(dev, newqs);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to allocate memory for new queues, keeping current settings");
+ return err;
+ }
+
+ fun_down(dev, &oldqs);
+
+ err = fun_up(dev, newqs);
+ if (!err)
+ return 0;
+
+ /* The new queues couldn't be installed. We do not retry the old queues
+ * as they are the same to the device as the new queues and would
+ * similarly fail.
+ */
+ newqs->state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, newqs);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues.");
+ return err;
+}
+
+/* Change the number of Rx/Tx queues of a device while it is up. This is done
+ * by incrementally adding/removing queues to meet the new requirements while
+ * handling ongoing traffic.
+ */
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ unsigned int keep_tx = min(dev->real_num_tx_queues, ntx);
+ unsigned int keep_rx = min(dev->real_num_rx_queues, nrx);
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_qset oldqs = {
+ .rxqs = rtnl_dereference(fp->rxqs),
+ .txqs = fp->txqs,
+ .nrxqs = dev->real_num_rx_queues,
+ .ntxqs = dev->real_num_tx_queues,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .state = FUN_QSTATE_DESTROYED
+ };
+ struct fun_qset newqs = {
+ .nrxqs = nrx,
+ .ntxqs = ntx,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL
+ };
+ int i, err;
+
+ err = fun_alloc_rings(dev, &newqs);
+ if (err)
+ goto free_irqs;
+
+ err = fun_enable_irqs(dev); /* of any newly added queues */
+ if (err)
+ goto free_rings;
+
+ /* copy the queues we are keeping to the new set */
+ memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs));
+ memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs));
+
+ if (nrx < dev->real_num_rx_queues) {
+ err = fun_rss_set_qnum(dev, nrx, true);
+ if (err)
+ goto disable_tx_irqs;
+
+ for (i = nrx; i < dev->real_num_rx_queues; i++)
+ fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi,
+ struct fun_irq, napi));
+
+ netif_set_real_num_rx_queues(dev, nrx);
+ }
+
+ if (ntx < dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ rcu_assign_pointer(fp->rxqs, newqs.rxqs);
+ fp->txqs = newqs.txqs;
+ synchronize_net();
+
+ if (ntx > dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ if (nrx > dev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(dev, nrx);
+ fun_rss_set_qnum(dev, nrx, false);
+ }
+
+ /* disable interrupts of any excess Tx queues */
+ for (i = keep_tx; i < oldqs.ntxqs; i++)
+ fun_disable_one_irq(oldqs.txqs[i]->irq);
+
+ fun_free_rings(dev, &oldqs);
+ fun_prune_queue_irqs(dev);
+ return 0;
+
+disable_tx_irqs:
+ for (i = oldqs.ntxqs; i < ntx; i++)
+ fun_disable_one_irq(newqs.txqs[i]->irq);
+free_rings:
+ newqs.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, &newqs);
+free_irqs:
+ fun_prune_queue_irqs(dev);
+ return err;
+}
+
+static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
+{
+ struct fun_dev *fdev = &ed->fdev;
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+ unsigned int ntx, nrx;
+ int rc;
+
+ rc = fun_max_qs(ed, &ntx, &nrx);
+ if (rc)
+ return rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx);
+ if (!netdev) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ netdev->dev_port = portid;
+ fun_queue_defaults(netdev, ed->nsqs_per_port);
+
+ fp = netdev_priv(netdev);
+ fp->fdev = fdev;
+ fp->pdev = to_pci_dev(fdev->dev);
+ fp->netdev = netdev;
+ xa_init(&fp->irqs);
+ fp->rx_irq_ofst = ntx;
+ seqcount_init(&fp->link_seq);
+
+ fp->lport = INVALID_LPORT;
+ rc = fun_port_create(netdev);
+ if (rc)
+ goto free_netdev;
+
+ /* bind port to admin CQ for async events */
+ rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid,
+ FUN_ADMIN_BIND_TYPE_EPCQ, 0);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_get_port_attributes(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_rss(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_stats_area(fp);
+ if (rc)
+ goto free_rss;
+
+ SET_NETDEV_DEV(netdev, fdev->dev);
+ netdev->netdev_ops = &fun_netdev_ops;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
+ if (fp->port_caps & FUN_PORT_CAP_OFFLOADS)
+ netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS;
+ if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS)
+ netdev->hw_features |= GSO_ENCAP_FLAGS;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+ netdev->mpls_features = netdev->vlan_features;
+ netdev->hw_enc_features = netdev->hw_features;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = FUN_MAX_MTU;
+
+ fun_set_ethtool_ops(netdev);
+
+ /* configurable parameters */
+ fp->sq_depth = min(SQ_DEPTH, fdev->q_depth);
+ fp->cq_depth = min(CQ_DEPTH, fdev->q_depth);
+ fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
+ fp->rx_coal_usec = CQ_INTCOAL_USEC;
+ fp->rx_coal_count = CQ_INTCOAL_NPKT;
+ fp->tx_coal_usec = SQ_INTCOAL_USEC;
+ fp->tx_coal_count = SQ_INTCOAL_NPKT;
+ fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+
+ rc = fun_dl_port_register(netdev);
+ if (rc)
+ goto free_stats;
+
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+ fun_ktls_init(netdev); /* optional, failure OK */
+
+ netif_carrier_off(netdev);
+ ed->netdevs[portid] = netdev;
+ rc = register_netdev(netdev);
+ if (rc)
+ goto unreg_devlink;
+
+ if (fp->dl_port.devlink)
+ devlink_port_type_eth_set(&fp->dl_port, netdev);
+
+ return 0;
+
+unreg_devlink:
+ ed->netdevs[portid] = NULL;
+ fun_ktls_cleanup(fp);
+ if (fp->dl_port.devlink)
+ devlink_port_unregister(&fp->dl_port);
+free_stats:
+ fun_free_stats_area(fp);
+free_rss:
+ fun_free_rss(fp);
+destroy_port:
+ fun_port_destroy(netdev);
+free_netdev:
+ free_netdev(netdev);
+done:
+ dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc);
+ return rc;
+}
+
+static void fun_destroy_netdev(struct net_device *netdev)
+{
+ struct funeth_priv *fp;
+
+ fp = netdev_priv(netdev);
+ if (fp->dl_port.devlink) {
+ devlink_port_type_clear(&fp->dl_port);
+ devlink_port_unregister(&fp->dl_port);
+ }
+ unregister_netdev(netdev);
+ fun_ktls_cleanup(fp);
+ fun_free_stats_area(fp);
+ fun_free_rss(fp);
+ fun_port_destroy(netdev);
+ free_netdev(netdev);
+}
+
+static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
+{
+ struct fun_dev *fd = &ed->fdev;
+ int i, rc;
+
+ /* The admin queue takes 1 IRQ and 2 SQs. */
+ ed->nsqs_per_port = min(fd->num_irqs - 1,
+ fd->kern_end_qid - 2) / nports;
+ if (ed->nsqs_per_port < 2) {
+ dev_err(fd->dev, "Too few SQs for %u ports", nports);
+ return -EINVAL;
+ }
+
+ ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL);
+ if (!ed->netdevs)
+ return -ENOMEM;
+
+ ed->num_ports = nports;
+ for (i = 0; i < nports; i++) {
+ rc = fun_create_netdev(ed, i);
+ if (rc)
+ goto free_netdevs;
+ }
+
+ return 0;
+
+free_netdevs:
+ while (i)
+ fun_destroy_netdev(ed->netdevs[--i]);
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+ return rc;
+}
+
+static void fun_destroy_ports(struct fun_ethdev *ed)
+{
+ unsigned int i;
+
+ for (i = 0; i < ed->num_ports; i++)
+ fun_destroy_netdev(ed->netdevs[i]);
+
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+}
+
+static void fun_update_link_state(const struct fun_ethdev *ed,
+ const struct fun_admin_port_notif *notif)
+{
+ unsigned int port_idx = be16_to_cpu(notif->id);
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+
+ if (port_idx >= ed->num_ports)
+ return;
+
+ netdev = ed->netdevs[port_idx];
+ fp = netdev_priv(netdev);
+
+ write_seqcount_begin(&fp->link_seq);
+ fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */
+ fp->active_fc = notif->flow_ctrl;
+ fp->active_fec = notif->fec;
+ fp->xcvr_type = notif->xcvr_type;
+ fp->link_down_reason = notif->link_down_reason;
+ fp->lp_advertising = be64_to_cpu(notif->lp_advertising);
+
+ if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN)
+ netif_carrier_off(netdev);
+ if (notif->link_state & FUN_PORT_FLAG_MAC_UP)
+ netif_carrier_on(netdev);
+
+ write_seqcount_end(&fp->link_seq);
+ fun_report_link(netdev);
+}
+
+/* handler for async events delivered through the admin CQ */
+static void fun_event_cb(struct fun_dev *fdev, void *entry)
+{
+ u8 op = ((struct fun_admin_rsp_common *)entry)->op;
+
+ if (op == FUN_ADMIN_OP_PORT) {
+ const struct fun_admin_port_notif *rsp = entry;
+
+ if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) {
+ fun_update_link_state(to_fun_ethdev(fdev), rsp);
+ } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) {
+ const struct fun_admin_res_count_rsp *r = entry;
+
+ if (r->count.data)
+ set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags);
+ else
+ set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags);
+ fun_serv_sched(fdev);
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u subop %u",
+ op, rsp->subop);
+ }
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u", op);
+ }
+}
+
+/* handler for pending work managed by the service task */
+static void fun_service_cb(struct fun_dev *fdev)
+{
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags))
+ fun_destroy_ports(ed);
+
+ if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags))
+ return;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc < 0 || rc == ed->num_ports)
+ return;
+
+ if (ed->num_ports)
+ fun_destroy_ports(ed);
+ if (rc)
+ fun_create_ports(ed, rc);
+}
+
+static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (nvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ed->state_mutex);
+ fun_free_vports(ed);
+ mutex_unlock(&ed->state_mutex);
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ rc = pci_enable_sriov(pdev, nvfs);
+ if (rc)
+ return rc;
+
+ mutex_lock(&ed->state_mutex);
+ rc = fun_init_vports(ed, nvfs);
+ mutex_unlock(&ed->state_mutex);
+ if (rc) {
+ pci_disable_sriov(pdev);
+ return rc;
+ }
+
+ return nvfs;
+}
+
+static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct fun_dev_params aqreq = {
+ .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE),
+ .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE),
+ .cq_depth = ADMIN_CQ_DEPTH,
+ .sq_depth = ADMIN_SQ_DEPTH,
+ .rq_depth = ADMIN_RQ_DEPTH,
+ .min_msix = 2, /* 1 Rx + 1 Tx */
+ .event_cb = fun_event_cb,
+ .serv_cb = fun_service_cb,
+ };
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+ struct fun_dev *fdev;
+ int rc;
+
+ devlink = fun_devlink_alloc(&pdev->dev);
+ if (!devlink) {
+ dev_err(&pdev->dev, "devlink alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ed = devlink_priv(devlink);
+ mutex_init(&ed->state_mutex);
+
+ fdev = &ed->fdev;
+ rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME);
+ if (rc)
+ goto free_devlink;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc > 0)
+ rc = fun_create_ports(ed, rc);
+ if (rc < 0)
+ goto disable_dev;
+
+ fun_serv_restart(fdev);
+ fun_devlink_register(devlink);
+ return 0;
+
+disable_dev:
+ fun_dev_disable(fdev);
+free_devlink:
+ mutex_destroy(&ed->state_mutex);
+ fun_devlink_free(devlink);
+ return rc;
+}
+
+static void funeth_remove(struct pci_dev *pdev)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+
+ ed = to_fun_ethdev(fdev);
+ devlink = priv_to_devlink(ed);
+ fun_devlink_unregister(devlink);
+
+#ifdef CONFIG_PCI_IOV
+ funeth_sriov_configure(pdev, 0);
+#endif
+
+ fun_serv_stop(fdev);
+ fun_destroy_ports(ed);
+ fun_dev_disable(fdev);
+ mutex_destroy(&ed->state_mutex);
+
+ fun_devlink_free(devlink);
+}
+
+static struct pci_driver funeth_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = funeth_id_table,
+ .probe = funeth_probe,
+ .remove = funeth_remove,
+ .shutdown = funeth_remove,
+ .sriov_configure = funeth_sriov_configure,
+};
+
+module_pci_driver(funeth_driver);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Fungible Ethernet Network Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEVICE_TABLE(pci, funeth_id_table);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
new file mode 100644
index 000000000000..0f6a549b9f67
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf_trace.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include "funeth_txrx.h"
+#include "funeth.h"
+#include "fun_queue.h"
+
+#define CREATE_TRACE_POINTS
+#include "funeth_trace.h"
+
+/* Given the device's max supported MTU and pages of at least 4KB a packet can
+ * be scattered into at most 4 buffers.
+ */
+#define RX_MAX_FRAGS 4
+
+/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
+#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+/* We try to reuse pages for our buffers. To avoid frequent page ref writes we
+ * take EXTRA_PAGE_REFS references at once and then hand them out one per packet
+ * occupying the buffer.
+ */
+#define EXTRA_PAGE_REFS 1000000
+#define MIN_PAGE_REFS 1000
+
+enum {
+ FUN_XDP_FLUSH_REDIR = 1,
+ FUN_XDP_FLUSH_TX = 2,
+};
+
+/* See if a page is running low on refs we are holding and if so take more. */
+static void refresh_refs(struct funeth_rxbuf *buf)
+{
+ if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) {
+ buf->pg_refs += EXTRA_PAGE_REFS;
+ page_ref_add(buf->page, EXTRA_PAGE_REFS);
+ }
+}
+
+/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
+ * page is worth retaining and there's room for it. Otherwise the page is
+ * unmapped and our references released.
+ */
+static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
+{
+ struct funeth_rx_cache *c = &q->cache;
+
+ if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
+ c->bufs[c->prod_cnt & c->mask] = *buf;
+ c->prod_cnt++;
+ } else {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ }
+}
+
+/* Get a page from the Rx buffer cache. We only consider the next available
+ * page and return it if we own all its references.
+ */
+static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ struct funeth_rx_cache *c = &q->cache;
+ struct funeth_rxbuf *buf;
+
+ if (c->prod_cnt == c->cons_cnt)
+ return false; /* empty cache */
+
+ buf = &c->bufs[c->cons_cnt & c->mask];
+ if (page_ref_count(buf->page) == buf->pg_refs) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ *rb = *buf;
+ buf->page = NULL;
+ refresh_refs(rb);
+ c->cons_cnt++;
+ return true;
+ }
+
+ /* Page can't be reused. If the cache is full drop this page. */
+ if (c->prod_cnt - c->cons_cnt > c->mask) {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ buf->page = NULL;
+ c->cons_cnt++;
+ }
+ return false;
+}
+
+/* Allocate and DMA-map a page for receive. */
+static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
+ int node, gfp_t gfp)
+{
+ struct page *p;
+
+ if (cache_get(q, rb))
+ return 0;
+
+ p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
+ FUN_QSTAT_INC(q, rx_map_err);
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ FUN_QSTAT_INC(q, rx_page_alloc);
+
+ rb->page = p;
+ rb->pg_refs = 1;
+ refresh_refs(rb);
+ rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
+ return 0;
+}
+
+static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ if (rb->page) {
+ dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __page_frag_cache_drain(rb->page, rb->pg_refs);
+ rb->page = NULL;
+ }
+}
+
+/* Run the XDP program assigned to an Rx queue.
+ * Return %NULL if the buffer is consumed, or the virtual address of the packet
+ * to turn into an skb.
+ */
+static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
+ int ref_ok, struct funeth_txq *xdp_q)
+{
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 act;
+
+ /* VA includes the headroom, frag size includes headroom + tailroom */
+ xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN),
+ &q->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) -
+ (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false);
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ /* remove headroom, which may not be FUN_XDP_HEADROOM now */
+ skb_frag_size_set(frags, xdp.data_end - xdp.data);
+ skb_frag_off_add(frags, xdp.data - xdp.data_hard_start);
+ goto pass;
+ case XDP_TX:
+ if (unlikely(!ref_ok))
+ goto pass;
+ if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_tx);
+ q->xdp_flush |= FUN_XDP_FLUSH_TX;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(!ref_ok))
+ goto pass;
+ if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_redir);
+ q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(q->netdev, xdp_prog, act);
+xdp_error:
+ q->cur_buf->pg_refs++; /* return frags' page reference */
+ FUN_QSTAT_INC(q, xdp_err);
+ break;
+ case XDP_DROP:
+ q->cur_buf->pg_refs++;
+ FUN_QSTAT_INC(q, xdp_drops);
+ break;
+ }
+ return NULL;
+
+pass:
+ return xdp.data;
+}
+
+/* A CQE contains a fixed completion structure along with optional metadata and
+ * even packet data. Given the start address of a CQE return the start of the
+ * contained fixed structure, which lies at the end.
+ */
+static const void *cqe_to_info(const void *cqe)
+{
+ return cqe + FUNETH_CQE_INFO_OFFSET;
+}
+
+/* The inverse of cqe_to_info(). */
+static const void *info_to_cqe(const void *cqe_info)
+{
+ return cqe_info - FUNETH_CQE_INFO_OFFSET;
+}
+
+/* Return the type of hash provided by the device based on the L3 and L4
+ * protocols it parsed for the packet.
+ */
+static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
+{
+ static const enum pkt_hash_types htype_map[] = {
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3
+ };
+ u16 key;
+
+ /* Build the key from the TCP/UDP and IP/IPv6 bits */
+ key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) |
+ ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1);
+
+ return htype_map[key];
+}
+
+/* Each received packet can be scattered across several Rx buffers or can
+ * share a buffer with previously received packets depending on the buffer
+ * and packet sizes and the room available in the most recently used buffer.
+ *
+ * The rules are:
+ * - If the buffer at the head of an RQ has not been used it gets (part of) the
+ * next incoming packet.
+ * - Otherwise, if the packet fully fits in the buffer's remaining space the
+ * packet is written there.
+ * - Otherwise, the packet goes into the next Rx buffer.
+ *
+ * This function returns the Rx buffer for a packet or fragment thereof of the
+ * given length. If it isn't @buf it either recycles or frees that buffer
+ * before advancing the queue to the next buffer.
+ *
+ * If called repeatedly with the remaining length of a packet it will walk
+ * through all the buffers containing the packet.
+ */
+static struct funeth_rxbuf *
+get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
+{
+ if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
+ return buf; /* @buf holds (part of) the packet */
+
+ /* The packet occupies part of the next buffer. Move there after
+ * replenishing the current buffer slot either with the spare page or
+ * by reusing the slot's existing page. Note that if a spare page isn't
+ * available and the current packet occupies @buf it is a multi-frag
+ * packet that will be dropped leaving @buf available for reuse.
+ */
+ if ((page_ref_count(buf->page) == buf->pg_refs &&
+ buf->node == numa_mem_id()) || !q->spare_buf.page) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ refresh_refs(buf);
+ } else {
+ cache_offer(q, buf);
+ *buf = q->spare_buf;
+ q->spare_buf.page = NULL;
+ q->rqes[q->rq_cons & q->rq_mask] =
+ FUN_EPRQ_RQBUF_INIT(buf->dma_addr);
+ }
+ q->buf_offset = 0;
+ q->rq_cons++;
+ return &q->bufs[q->rq_cons & q->rq_mask];
+}
+
+/* Gather the page fragments making up the first Rx packet on @q. Its total
+ * length @tot_len includes optional head- and tail-rooms.
+ *
+ * Return 0 if the device retains ownership of at least some of the pages.
+ * In this case the caller may only copy the packet.
+ *
+ * A non-zero return value gives the caller permission to use references to the
+ * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
+ * one of the pages is PF_MEMALLOC.
+ *
+ * Regardless of outcome the caller is granted a reference to each of the pages.
+ */
+static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
+ skb_frag_t *frags)
+{
+ struct funeth_rxbuf *buf = q->cur_buf;
+ unsigned int frag_len;
+ int ref_ok = 1;
+
+ for (;;) {
+ buf = get_buf(q, buf, tot_len);
+
+ /* We always keep the RQ full of buffers so before we can give
+ * one of our pages to the stack we require that we can obtain
+ * a replacement page. If we can't the packet will either be
+ * copied or dropped so we can retain ownership of the page and
+ * reuse it.
+ */
+ if (!q->spare_buf.page &&
+ funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
+ GFP_ATOMIC | __GFP_MEMALLOC))
+ ref_ok = 0;
+
+ frag_len = min_t(unsigned int, tot_len,
+ PAGE_SIZE - q->buf_offset);
+ dma_sync_single_for_cpu(q->dma_dev,
+ buf->dma_addr + q->buf_offset,
+ frag_len, DMA_FROM_DEVICE);
+ buf->pg_refs--;
+ if (ref_ok)
+ ref_ok |= buf->node;
+
+ __skb_frag_set_page(frags, buf->page);
+ skb_frag_off_set(frags, q->buf_offset);
+ skb_frag_size_set(frags++, frag_len);
+
+ tot_len -= frag_len;
+ if (!tot_len)
+ break;
+
+ q->buf_offset = PAGE_SIZE;
+ }
+ q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
+ q->cur_buf = buf;
+ return ref_ok;
+}
+
+static bool rx_hwtstamp_enabled(const struct net_device *dev)
+{
+ const struct funeth_priv *d = netdev_priv(dev);
+
+ return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
+/* Advance the CQ pointers and phase tag to the next CQE. */
+static void advance_cq(struct funeth_rxq *q)
+{
+ if (unlikely(q->cq_head == q->cq_mask)) {
+ q->cq_head = 0;
+ q->phase ^= 1;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+ } else {
+ q->cq_head++;
+ q->next_cqe_info += FUNETH_CQE_SIZE;
+ }
+ prefetch(q->next_cqe_info);
+}
+
+/* Process the packet represented by the head CQE of @q. Gather the packet's
+ * fragments, run it through the optional XDP program, and if needed construct
+ * an skb and pass it to the stack.
+ */
+static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
+{
+ const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
+ unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len);
+ struct net_device *ndev = q->netdev;
+ skb_frag_t frags[RX_MAX_FRAGS];
+ struct skb_shared_info *si;
+ unsigned int headroom;
+ gro_result_t gro_res;
+ struct sk_buff *skb;
+ int ref_ok;
+ void *va;
+ u16 cv;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_pkts++;
+ q->stats.rx_bytes += pkt_len;
+ u64_stats_update_end(&q->syncp);
+
+ advance_cq(q);
+
+ /* account for head- and tail-room, present only for 1-buffer packets */
+ tot_len = pkt_len;
+ headroom = be16_to_cpu(rxreq->headroom);
+ if (likely(headroom))
+ tot_len += FUN_RX_TAILROOM + headroom;
+
+ ref_ok = fun_gather_pkt(q, tot_len, frags);
+ va = skb_frag_address(frags);
+ if (xdp_q && headroom == FUN_XDP_HEADROOM) {
+ va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
+ if (!va)
+ return;
+ headroom = 0; /* XDP_PASS trims it */
+ }
+ if (unlikely(!ref_ok))
+ goto no_mem;
+
+ if (likely(headroom)) {
+ /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */
+ prefetch(va + headroom);
+ skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN));
+ if (unlikely(!skb))
+ goto no_mem;
+
+ skb_reserve(skb, headroom);
+ __skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ } else {
+ prefetch(va);
+ skb = napi_get_frags(q->napi);
+ if (unlikely(!skb))
+ goto no_mem;
+
+ if (ref_ok < 0)
+ skb->pfmemalloc = 1;
+
+ si = skb_shinfo(skb);
+ si->nr_frags = rxreq->nsgl;
+ for (i = 0; i < si->nr_frags; i++)
+ si->frags[i] = frags[i];
+
+ skb->len = pkt_len;
+ skb->data_len = pkt_len;
+ skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN);
+ }
+
+ skb_record_rx_queue(skb, q->qidx);
+ cv = be16_to_cpu(rxreq->pkt_cv);
+ if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
+ skb_set_hash(skb, be32_to_cpu(rxreq->hash),
+ cqe_to_pkt_hash_type(cv));
+ if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
+ FUN_QSTAT_INC(q, rx_cso);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = be16_to_cpu(rxreq->csum) - 1;
+ }
+ if (unlikely(rx_hwtstamp_enabled(q->netdev)))
+ skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp);
+
+ trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
+
+ gro_res = skb->data_len ? napi_gro_frags(q->napi) :
+ napi_gro_receive(q->napi, skb);
+ if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE)
+ FUN_QSTAT_INC(q, gro_merged);
+ else if (gro_res == GRO_HELD)
+ FUN_QSTAT_INC(q, gro_pkts);
+ return;
+
+no_mem:
+ FUN_QSTAT_INC(q, rx_mem_drops);
+
+ /* Release the references we've been granted for the frag pages.
+ * We return the ref of the last frag and free the rest.
+ */
+ q->cur_buf->pg_refs++;
+ for (i = 0; i < rxreq->nsgl - 1; i++)
+ __free_page(skb_frag_page(frags + i));
+}
+
+/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
+ * indicating the CQE is new.
+ */
+static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
+{
+ u16 sf_p = be16_to_cpu(ci->sf_p);
+
+ return (sf_p & 1) ^ phase;
+}
+
+/* Walk through a CQ identifying and processing fresh CQEs up to the given
+ * budget. Return the remaining budget.
+ */
+static int fun_process_cqes(struct funeth_rxq *q, int budget)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct funeth_txq **xdpqs, *xdp_q = NULL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (xdpqs)
+ xdp_q = xdpqs[smp_processor_id()];
+
+ while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
+ /* access other descriptor fields after the phase check */
+ dma_rmb();
+
+ fun_handle_cqe_pkt(q, xdp_q);
+ budget--;
+ }
+
+ if (unlikely(q->xdp_flush)) {
+ if (q->xdp_flush & FUN_XDP_FLUSH_TX)
+ fun_txq_wr_db(xdp_q);
+ if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
+ xdp_do_flush();
+ q->xdp_flush = 0;
+ }
+
+ return budget;
+}
+
+/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
+ * doorbells as needed.
+ */
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_rxq *q = irq->rxq;
+ int work_done = budget - fun_process_cqes(q, budget);
+ u32 cq_db_val = q->cq_head;
+
+ if (unlikely(work_done >= budget))
+ FUN_QSTAT_INC(q, rx_budget);
+ else if (napi_complete_done(napi, work_done))
+ cq_db_val |= q->irq_db_val;
+
+ /* check whether to post new Rx buffers */
+ if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
+ u64_stats_update_end(&q->syncp);
+ q->rq_cons_db = q->rq_cons;
+ writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
+ }
+
+ writel(cq_db_val, q->cq_db);
+ return work_done;
+}
+
+/* Free the Rx buffers of an Rx queue. */
+static void fun_rxq_free_bufs(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++)
+ funeth_free_page(q, b);
+
+ funeth_free_page(q, &q->spare_buf);
+ q->cur_buf = NULL;
+}
+
+/* Initially provision an Rx queue with Rx buffers. */
+static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++) {
+ if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
+ fun_rxq_free_bufs(q);
+ return -ENOMEM;
+ }
+ q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
+ }
+ q->cur_buf = q->bufs;
+ return 0;
+}
+
+/* Initialize a used-buffer cache of the given depth. */
+static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
+ int node)
+{
+ c->mask = depth - 1;
+ c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
+ return c->bufs ? 0 : -ENOMEM;
+}
+
+/* Deallocate an Rx queue's used-buffer cache and its contents. */
+static void fun_rxq_free_cache(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->cache.bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->cache.mask; i++, b++)
+ funeth_free_page(q, b);
+
+ kvfree(q->cache.bufs);
+ q->cache.bufs = NULL;
+}
+
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_admin_epcq_req cmd;
+ u16 headroom;
+ int err;
+
+ headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+ if (headroom != q->headroom) {
+ cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd));
+ cmd.u.modify =
+ FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY,
+ 0, q->hw_cqid, headroom);
+ err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0,
+ 0);
+ if (err)
+ return err;
+ q->headroom = headroom;
+ }
+
+ WRITE_ONCE(q->xdp_prog, prog);
+ return 0;
+}
+
+/* Create an Rx queue, allocating the host memory it needs. */
+static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ncqe,
+ unsigned int nrqe,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_rxq *q;
+ int err = -ENOMEM;
+ int numa_node;
+
+ numa_node = fun_irq_node(irq);
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->qidx = qidx;
+ q->netdev = dev;
+ q->cq_mask = ncqe - 1;
+ q->rq_mask = nrqe - 1;
+ q->numa_node = numa_node;
+ q->rq_db_thres = nrqe / 4;
+ u64_stats_init(&q->syncp);
+ q->dma_dev = &fp->pdev->dev;
+
+ q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
+ sizeof(*q->bufs), false, numa_node,
+ &q->rq_dma_addr, (void **)&q->bufs, NULL);
+ if (!q->rqes)
+ goto free_q;
+
+ q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
+ false, numa_node, &q->cq_dma_addr, NULL,
+ NULL);
+ if (!q->cqes)
+ goto free_rqes;
+
+ err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
+ if (err)
+ goto free_cqes;
+
+ err = fun_rxq_alloc_bufs(q, numa_node);
+ if (err)
+ goto free_cache;
+
+ q->stats.rx_bufs = q->rq_mask;
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_cache:
+ fun_rxq_free_cache(q);
+free_cqes:
+ dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
+ q->cq_dma_addr);
+free_rqes:
+ fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
+ q->rq_dma_addr, q->bufs);
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
+ return ERR_PTR(err);
+}
+
+static void fun_rxq_free_sw(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_rxq_free_cache(q);
+ fun_rxq_free_bufs(q);
+ fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
+ q->rqes, q->rq_dma_addr, q->bufs);
+ dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
+ q->cqes, q->cq_dma_addr);
+
+ /* Before freeing the queue transfer key counters to the device. */
+ fp->rx_packets += q->stats.rx_pkts;
+ fp->rx_bytes += q->stats.rx_bytes;
+ fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
+
+ kfree(q);
+}
+
+/* Create an Rx queue's resources on the device. */
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int ncqe = q->cq_mask + 1;
+ unsigned int nrqe = q->rq_mask + 1;
+ int err;
+
+ err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
+ irq->napi.napi_id);
+ if (err)
+ goto out;
+
+ err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ goto xdp_unreg;
+
+ q->phase = 1;
+ q->irq_cnt = 0;
+ q->cq_head = 0;
+ q->rq_cons = 0;
+ q->rq_cons_db = 0;
+ q->buf_offset = 0;
+ q->napi = &irq->napi;
+ q->irq_db_val = fp->cq_irq_db;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+
+ q->xdp_prog = fp->xdp_prog;
+ q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+
+ err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0,
+ FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
+ 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT,
+ &q->hw_sqid, &q->rq_db);
+ if (err)
+ goto xdp_unreg;
+
+ err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0,
+ q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
+ q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
+ irq->irq_idx, 0, fp->fdev->kern_end_qid,
+ &q->hw_cqid, &q->cq_db);
+ if (err)
+ goto free_rq;
+
+ irq->rxq = q;
+ writel(q->rq_mask, q->rq_db);
+ q->init_state = FUN_QSTATE_INIT_FULL;
+
+ netif_info(fp, ifup, q->netdev,
+ "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n",
+ q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
+ q->numa_node, q->headroom);
+ return 0;
+
+free_rq:
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+xdp_unreg:
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+out:
+ netdev_err(q->netdev,
+ "Failed to create Rx queue %u on device, error %d\n",
+ q->qidx, err);
+ return err;
+}
+
+static void fun_rxq_free_dev(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_irq *irq;
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ irq = container_of(q->napi, struct fun_irq, napi);
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing Rx queue %u (id %u/%u), IRQ %u\n",
+ q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
+
+ irq->rxq = NULL;
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+ fun_destroy_cq(fp->fdev, q->hw_cqid);
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance an Rx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp)
+{
+ struct funeth_rxq *q = *qp;
+ int err;
+
+ if (!q) {
+ q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+ }
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_rxq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_rxq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Rx queue resources until it reaches the target state. */
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_rxq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_rxq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
new file mode 100644
index 000000000000..9e58dfec19d5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM funeth
+
+#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUNETH_H
+
+#include <linux/tracepoint.h>
+
+#include "funeth_txrx.h"
+
+TRACE_EVENT(funeth_tx,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 len,
+ u32 sqe_idx,
+ u32 ngle),
+
+ TP_ARGS(txq, len, sqe_idx, ngle),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, len)
+ __field(u32, sqe_idx)
+ __field(u32, ngle)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->len = len;
+ __entry->sqe_idx = sqe_idx;
+ __entry->ngle = ngle;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->len, __entry->ngle)
+);
+
+TRACE_EVENT(funeth_tx_free,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 sqe_idx,
+ u32 num_sqes,
+ u32 hw_head),
+
+ TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, sqe_idx)
+ __field(u32, num_sqes)
+ __field(u32, hw_head)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->sqe_idx = sqe_idx;
+ __entry->num_sqes = num_sqes;
+ __entry->hw_head = hw_head;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->num_sqes, __entry->hw_head)
+);
+
+TRACE_EVENT(funeth_rx,
+
+ TP_PROTO(const struct funeth_rxq *rxq,
+ u32 num_rqes,
+ u32 pkt_len,
+ u32 hash,
+ u32 cls_vec),
+
+ TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, cq_head)
+ __field(u32, num_rqes)
+ __field(u32, len)
+ __field(u32, hash)
+ __field(u32, cls_vec)
+ __string(devname, rxq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = rxq->qidx;
+ __entry->cq_head = rxq->cq_head;
+ __entry->num_rqes = num_rqes;
+ __entry->len = pkt_len;
+ __entry->hash = hash;
+ __entry->cls_vec = cls_vec;
+ __assign_str(devname, rxq->netdev->name);
+ ),
+
+ TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
+ __get_str(devname), __entry->qidx, __entry->cq_head,
+ __entry->num_rqes, __entry->len, __entry->hash,
+ __entry->cls_vec)
+);
+
+#endif /* _TRACE_FUNETH_H */
+
+/* Below must be outside protection. */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE funeth_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
new file mode 100644
index 000000000000..ff6e29237253
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/ip.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <uapi/linux/udp.h>
+#include "funeth.h"
+#include "funeth_ktls.h"
+#include "funeth_txrx.h"
+#include "funeth_trace.h"
+#include "fun_queue.h"
+
+#define FUN_XDP_CLEAN_THRES 32
+#define FUN_XDP_CLEAN_BATCH 16
+
+/* DMA-map a packet and return the (length, DMA_address) pairs for its
+ * segments. If a mapping error occurs -ENOMEM is returned.
+ */
+static int map_skb(const struct sk_buff *skb, struct device *dev,
+ dma_addr_t *addr, unsigned int *len)
+{
+ const struct skb_shared_info *si;
+ const skb_frag_t *fp, *end;
+
+ *len = skb_headlen(skb);
+ *addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ return -ENOMEM;
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *++len = skb_frag_size(fp);
+ *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+/* Return the address just past the end of a Tx queue's descriptor ring.
+ * It exploits the fact that the HW writeback area is just after the end
+ * of the descriptor ring.
+ */
+static void *txq_end(const struct funeth_txq *q)
+{
+ return (void *)q->hw_wb;
+}
+
+/* Return the amount of space within a Tx ring from the given address to the
+ * end.
+ */
+static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
+{
+ return txq_end(q) - p;
+}
+
+/* Return the number of Tx descriptors occupied by a Tx request. */
+static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
+{
+ return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
+}
+
+static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
+{
+ return *(__be16 *)&tcp_flag_word(th);
+}
+
+static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int *tls_len)
+{
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct fun_ktls_tx_ctx *tls_ctx;
+ u32 datalen, seq;
+
+ datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (!datalen)
+ return skb;
+
+ if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
+ seq = ntohl(tcp_hdr(skb)->seq);
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+
+ if (likely(tls_ctx->next_seq == seq)) {
+ *tls_len = datalen;
+ return skb;
+ }
+ if (seq - tls_ctx->next_seq < U32_MAX / 4) {
+ tls_offload_tx_resync_request(skb->sk, seq,
+ tls_ctx->next_seq);
+ }
+ }
+
+ FUN_QSTAT_INC(q, tx_tls_fallback);
+ skb = tls_encrypt_skb(skb);
+ if (!skb)
+ FUN_QSTAT_INC(q, tx_tls_drops);
+
+ return skb;
+#else
+ return NULL;
+#endif
+}
+
+/* Write as many descriptors as needed for the supplied skb starting at the
+ * current producer location. The caller has made certain enough descriptors
+ * are available.
+ *
+ * Returns the number of descriptors written, 0 on error.
+ */
+static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int tls_len)
+{
+ unsigned int extra_bytes = 0, extra_pkts = 0;
+ unsigned int idx = q->prod_cnt & q->mask;
+ const struct skb_shared_info *shinfo;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t addrs[MAX_SKB_FRAGS + 1];
+ struct fun_eth_tx_req *req;
+ struct fun_dataop_gl *gle;
+ const struct tcphdr *th;
+ unsigned int ngle, i;
+ u16 flags;
+
+ if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return 0;
+ }
+
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = 0;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+
+ shinfo = skb_shinfo(skb);
+ if (likely(shinfo->gso_size)) {
+ if (skb->encapsulation) {
+ u16 ol4_ofst;
+
+ flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_OUTER_L3_LEN;
+ if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
+ FUN_ETH_OUTER_UDP;
+ if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
+ ol4_ofst = skb_transport_offset(skb);
+ } else {
+ ol4_ofst = skb_inner_network_offset(skb);
+ }
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_OUTER_IPV6;
+
+ if (skb->inner_network_header) {
+ if (inner_ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ else
+ flags |= FUN_ETH_INNER_IPV6 |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ }
+ th = inner_tcp_hdr(skb);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_inner_network_offset(skb),
+ skb_inner_transport_offset(skb),
+ skb_network_offset(skb), ol4_ofst);
+ FUN_QSTAT_INC(q, tx_encap_tso);
+ } else {
+ /* HW considers one set of headers as inner */
+ flags = FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ if (shinfo->gso_type & SKB_GSO_TCPV6)
+ flags |= FUN_ETH_INNER_IPV6;
+ else
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ th = tcp_hdr(skb);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_tso);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_cso += shinfo->gso_segs;
+ u64_stats_update_end(&q->syncp);
+
+ extra_pkts = shinfo->gso_segs - 1;
+ extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
+ __tcp_hdrlen(th)) * extra_pkts;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ flags |= FUN_ETH_INNER_UDP;
+ fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
+ skb_checksum_start_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_cso);
+ } else {
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ }
+
+ ngle = shinfo->nr_frags + 1;
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+ req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
+ struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
+ struct fun_ktls_tx_ctx *tls_ctx;
+
+ req->len8 += FUNETH_TLS_SZ / 8;
+ req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
+
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ tls->tlsid = tls_ctx->tlsid;
+ tls_ctx->next_seq += tls_len;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_tls_bytes += tls_len;
+ q->stats.tx_tls_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += skb->len + extra_bytes;
+ q->stats.tx_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+
+ q->info[idx].skb = skb;
+
+ trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
+ return tx_req_ndesc(req);
+}
+
+/* Return the number of available descriptors of a Tx queue.
+ * HW assumes head==tail means the ring is empty so we need to keep one
+ * descriptor unused.
+ */
+static unsigned int fun_txq_avail(const struct funeth_txq *q)
+{
+ return q->mask - q->prod_cnt + q->cons_cnt;
+}
+
+/* Stop a queue if it can't handle another worst-case packet. */
+static void fun_tx_check_stop(struct funeth_txq *q)
+{
+ if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
+ return;
+
+ netif_tx_stop_queue(q->ndq);
+
+ /* NAPI reclaim is freeing packets in parallel with us and we may race.
+ * We have stopped the queue but check again after synchronizing with
+ * reclaim.
+ */
+ smp_mb();
+ if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
+ FUN_QSTAT_INC(q, tx_nstops);
+ else
+ netif_tx_start_queue(q->ndq);
+}
+
+/* Return true if a queue has enough space to restart. Current condition is
+ * that the queue must be >= 1/4 empty.
+ */
+static bool fun_txq_may_restart(struct funeth_txq *q)
+{
+ return fun_txq_avail(q) >= q->mask / 4;
+}
+
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int qid = skb_get_queue_mapping(skb);
+ struct funeth_txq *q = fp->txqs[qid];
+ unsigned int tls_len = 0;
+ unsigned int ndesc;
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
+ tls_is_sk_tx_device_offloaded(skb->sk)) {
+ skb = fun_tls_tx(skb, q, &tls_len);
+ if (unlikely(!skb))
+ goto dropped;
+ }
+
+ ndesc = write_pkt_desc(skb, q, tls_len);
+ if (unlikely(!ndesc)) {
+ dev_kfree_skb_any(skb);
+ goto dropped;
+ }
+
+ q->prod_cnt += ndesc;
+ fun_tx_check_stop(q);
+
+ skb_tx_timestamp(skb);
+
+ if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
+ fun_txq_wr_db(q);
+ else
+ FUN_QSTAT_INC(q, tx_more);
+
+ return NETDEV_TX_OK;
+
+dropped:
+ /* A dropped packet may be the last one in a xmit_more train,
+ * ring the doorbell just in case.
+ */
+ if (!netdev_xmit_more())
+ fun_txq_wr_db(q);
+ return NETDEV_TX_OK;
+}
+
+/* Return a Tx queue's HW head index written back to host memory. */
+static u16 txq_hw_head(const struct funeth_txq *q)
+{
+ return (u16)be64_to_cpu(*q->hw_wb);
+}
+
+/* Unmap the Tx packet starting at the given descriptor index and
+ * return the number of Tx descriptors it occupied.
+ */
+static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
+{
+ const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
+ unsigned int ngle = req->dataop.ngather;
+ struct fun_dataop_gl *gle;
+
+ if (ngle) {
+ gle = (struct fun_dataop_gl *)req->dataop.imm;
+ dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
+
+ for (gle++; --ngle && txq_to_end(q, gle); gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+
+ for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+ }
+
+ return tx_req_ndesc(req);
+}
+
+/* Reclaim completed Tx descriptors and free their packets. Restart a stopped
+ * queue if we freed enough descriptors.
+ *
+ * Return true if we exhausted the budget while there is more work to be done.
+ */
+static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
+{
+ unsigned int npkts = 0, nbytes = 0, ndesc = 0;
+ unsigned int head, limit, reclaim_idx;
+
+ /* budget may be 0, e.g., netpoll */
+ limit = budget ? budget : UINT_MAX;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
+ struct sk_buff *skb = q->info[reclaim_idx].skb;
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
+
+ nbytes += skb->len;
+ napi_consume_skb(skb, budget);
+ ndesc += pkt_desc;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ npkts++;
+ } while (reclaim_idx != head && npkts < limit);
+ }
+
+ q->cons_cnt += ndesc;
+ netdev_tx_completed_queue(q->ndq, npkts, nbytes);
+ smp_mb(); /* pairs with the one in fun_tx_check_stop() */
+
+ if (unlikely(netif_tx_queue_stopped(q->ndq) &&
+ fun_txq_may_restart(q))) {
+ netif_tx_wake_queue(q->ndq);
+ FUN_QSTAT_INC(q, tx_nrestarts);
+ }
+
+ return reclaim_idx != head;
+}
+
+/* The NAPI handler for Tx queues. */
+int fun_txq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_txq *q = irq->txq;
+ unsigned int db_val;
+
+ if (fun_txq_reclaim(q, budget))
+ return budget; /* exhausted budget */
+
+ napi_complete(napi); /* exhausted pending work */
+ db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
+ writel(db_val, q->db);
+ return 0;
+}
+
+static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
+{
+ const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
+ const struct fun_dataop_gl *gle;
+
+ gle = (const struct fun_dataop_gl *)req->dataop.imm;
+ dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
+}
+
+/* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
+static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
+{
+ unsigned int npkts = 0, head, reclaim_idx;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ fun_xdp_unmap(q, reclaim_idx);
+ page_frag_free(q->info[reclaim_idx].vaddr);
+
+ trace_funeth_tx_free(q, reclaim_idx, 1, head);
+
+ reclaim_idx = (reclaim_idx + 1) & q->mask;
+ npkts++;
+ } while (reclaim_idx != head && npkts < budget);
+ }
+
+ q->cons_cnt += npkts;
+ return npkts;
+}
+
+bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+{
+ struct fun_eth_tx_req *req;
+ struct fun_dataop_gl *gle;
+ unsigned int idx;
+ dma_addr_t dma;
+
+ if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
+ fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
+
+ if (!unlikely(fun_txq_avail(q))) {
+ FUN_QSTAT_INC(q, tx_xdp_full);
+ return false;
+ }
+
+ dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return false;
+ }
+
+ idx = q->prod_cnt & q->mask;
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+
+ gle = (struct fun_dataop_gl *)req->dataop.imm;
+ fun_dataop_gl_init(gle, 0, 0, len, dma);
+
+ q->info[idx].vaddr = data;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += len;
+ q->stats.tx_pkts++;
+ u64_stats_update_end(&q->syncp);
+
+ trace_funeth_tx(q, len, idx, 1);
+ q->prod_cnt++;
+
+ return true;
+}
+
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q, **xdpqs;
+ int i, q_idx;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (unlikely(!xdpqs))
+ return -ENETDOWN;
+
+ q_idx = smp_processor_id();
+ if (unlikely(q_idx >= fp->num_xdpqs))
+ return -ENXIO;
+
+ for (q = xdpqs[q_idx], i = 0; i < n; i++) {
+ const struct xdp_frame *xdpf = frames[i];
+
+ if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+ break;
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ fun_txq_wr_db(q);
+ return i;
+}
+
+/* Purge a Tx queue of any queued packets. Should be called once HW access
+ * to the packets has been revoked, e.g., after the queue has been disabled.
+ */
+static void fun_txq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ q->cons_cnt += unmap_skb(q, idx);
+ dev_kfree_skb_any(q->info[idx].skb);
+ }
+ netdev_tx_reset_queue(q->ndq);
+}
+
+static void fun_xdpq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ fun_xdp_unmap(q, idx);
+ page_frag_free(q->info[idx].vaddr);
+ q->cons_cnt++;
+ }
+}
+
+/* Create a Tx queue, allocating all the host resources needed. */
+static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ndesc,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q;
+ int numa_node;
+
+ if (irq)
+ numa_node = fun_irq_node(irq); /* skb Tx queue */
+ else
+ numa_node = cpu_to_node(qidx); /* XDP Tx queue */
+
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->dma_dev = &fp->pdev->dev;
+ q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
+ sizeof(*q->info), true, numa_node,
+ &q->dma_addr, (void **)&q->info,
+ &q->hw_wb);
+ if (!q->desc)
+ goto free_q;
+
+ q->netdev = dev;
+ q->mask = ndesc - 1;
+ q->qidx = qidx;
+ q->numa_node = numa_node;
+ u64_stats_init(&q->syncp);
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Can't allocate memory for %s queue %u\n",
+ irq ? "Tx" : "XDP", qidx);
+ return NULL;
+}
+
+static void fun_txq_free_sw(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
+ q->desc, q->dma_addr, q->info);
+
+ fp->tx_packets += q->stats.tx_pkts;
+ fp->tx_bytes += q->stats.tx_bytes;
+ fp->tx_dropped += q->stats.tx_map_err;
+
+ kfree(q);
+}
+
+/* Allocate the device portion of a Tx queue. */
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int irq_idx, ndesc = q->mask + 1;
+ int err;
+
+ q->irq = irq;
+ *q->hw_wb = 0;
+ q->prod_cnt = 0;
+ q->cons_cnt = 0;
+ irq_idx = irq ? irq->irq_idx : 0;
+
+ err = fun_sq_create(fp->fdev,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
+ FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
+ q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
+ irq_idx, 0, fp->fdev->kern_end_qid, 0,
+ &q->hw_qid, &q->db);
+ if (err)
+ goto out;
+
+ err = fun_create_and_bind_tx(fp, q->hw_qid);
+ if (err < 0)
+ goto free_devq;
+ q->ethid = err;
+
+ if (irq) {
+ irq->txq = q;
+ q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
+ q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
+ fp->tx_coal_count);
+ writel(q->irq_db_val, q->db);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_FULL;
+ netif_info(fp, ifup, q->netdev,
+ "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
+ irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
+ q->ethid, q->numa_node);
+ return 0;
+
+free_devq:
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+out:
+ netdev_err(q->netdev,
+ "Failed to create %s queue %u on device, error %d\n",
+ irq ? "Tx" : "XDP", q->qidx, err);
+ return err;
+}
+
+static void fun_txq_free_dev(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
+ q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
+ q->irq ? q->irq->irq_idx : 0, q->ethid);
+
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
+
+ if (q->irq) {
+ q->irq->txq = NULL;
+ fun_txq_purge(q);
+ } else {
+ fun_xdpq_purge(q);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance a Tx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp)
+{
+ struct funeth_txq *q = *qp;
+ int err;
+
+ if (!q)
+ q = fun_txq_create_sw(dev, qidx, ndesc, irq);
+ if (!q)
+ return -ENOMEM;
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_txq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_txq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Tx queue resources until it reaches the target state.
+ * The queue must be already disconnected from the stack.
+ */
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_txq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_txq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
new file mode 100644
index 000000000000..04c9f91b7489
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_TXRX_H
+#define _FUNETH_TXRX_H
+
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+/* Tx descriptor size */
+#define FUNETH_SQE_SIZE 64U
+
+/* Size of device headers per Tx packet */
+#define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req))
+
+/* Number of gather list entries per Tx descriptor */
+#define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl))
+
+/* Max gather list size in bytes for an sk_buff. */
+#define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+# define FUNETH_TLS_SZ sizeof(struct fun_eth_tls)
+#else
+# define FUNETH_TLS_SZ 0
+#endif
+
+/* Max number of Tx descriptors for an sk_buff using a gather list. */
+#define FUNETH_MAX_GL_DESC \
+ DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \
+ FUNETH_SQE_SIZE)
+
+/* Max number of Tx descriptors for any packet. */
+#define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC
+
+/* Rx CQ descriptor size. */
+#define FUNETH_CQE_SIZE 64U
+
+/* Offset of cqe_info within a CQE. */
+#define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+/* Construct the IRQ portion of a CQ doorbell. The resulting value arms the
+ * interrupt with the supplied time delay and packet count moderation settings.
+ */
+#define FUN_IRQ_CQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* As above for SQ doorbells. */
+#define FUN_IRQ_SQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | \
+ ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* Per packet tailroom. Present only for 1-frag packets. */
+#define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to
+ * accommodate two packets per buffer for 4K pages and 1500B MTUs.
+ */
+#define FUN_XDP_HEADROOM 192
+
+/* Initialization state of a queue. */
+enum {
+ FUN_QSTATE_DESTROYED, /* what queue? */
+ FUN_QSTATE_INIT_SW, /* exists in SW, not on the device */
+ FUN_QSTATE_INIT_FULL, /* exists both in SW and on device */
+};
+
+/* Initialization state of an interrupt. */
+enum {
+ FUN_IRQ_INIT, /* initialized and in the XArray but inactive */
+ FUN_IRQ_REQUESTED, /* request_irq() done */
+ FUN_IRQ_ENABLED, /* processing enabled */
+ FUN_IRQ_DISABLED, /* processing disabled */
+};
+
+struct bpf_prog;
+
+struct funeth_txq_stats { /* per Tx queue SW counters */
+ u64 tx_pkts; /* # of Tx packets */
+ u64 tx_bytes; /* total bytes of Tx packets */
+ u64 tx_cso; /* # of packets with checksum offload */
+ u64 tx_tso; /* # of non-encapsulated TSO super-packets */
+ u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_more; /* # of DBs elided due to xmit_more */
+ u64 tx_nstops; /* # of times the queue has stopped */
+ u64 tx_nrestarts; /* # of times the queue has restarted */
+ u64 tx_map_err; /* # of packets dropped due to DMA mapping errors */
+ u64 tx_xdp_full; /* # of XDP packets that could not be enqueued */
+ u64 tx_tls_pkts; /* # of Tx TLS packets offloaded to HW */
+ u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */
+ u64 tx_tls_fallback; /* attempted Tx TLS offloads punted to SW */
+ u64 tx_tls_drops; /* attempted Tx TLS offloads dropped */
+};
+
+struct funeth_tx_info { /* per Tx descriptor state */
+ union {
+ struct sk_buff *skb; /* associated packet */
+ void *vaddr; /* start address for XDP */
+ };
+};
+
+struct funeth_txq {
+ /* RO cacheline of frequently accessed data */
+ u32 mask; /* queue depth - 1 */
+ u32 hw_qid; /* device ID of the queue */
+ void *desc; /* base address of descriptor ring */
+ struct funeth_tx_info *info;
+ struct device *dma_dev; /* device for DMA mappings */
+ volatile __be64 *hw_wb; /* HW write-back location */
+ u32 __iomem *db; /* SQ doorbell register address */
+ struct netdev_queue *ndq;
+ dma_addr_t dma_addr; /* DMA address of descriptor ring */
+ /* producer R/W cacheline */
+ u16 qidx; /* queue index within net_device */
+ u16 ethid;
+ u32 prod_cnt; /* producer counter */
+ struct funeth_txq_stats stats;
+ /* shared R/W cacheline, primarily accessed by consumer */
+ u32 irq_db_val; /* value written to IRQ doorbell */
+ u32 cons_cnt; /* consumer (cleanup) counter */
+ struct net_device *netdev;
+ struct fun_irq *irq;
+ int numa_node;
+ u8 init_state; /* queue initialization state */
+ struct u64_stats_sync syncp;
+};
+
+struct funeth_rxq_stats { /* per Rx queue SW counters */
+ u64 rx_pkts; /* # of received packets, including SW drops */
+ u64 rx_bytes; /* total size of received packets */
+ u64 rx_cso; /* # of packets with checksum offload */
+ u64 rx_bufs; /* total # of Rx buffers provided to device */
+ u64 gro_pkts; /* # of GRO superpackets */
+ u64 gro_merged; /* # of pkts merged into existing GRO superpackets */
+ u64 rx_page_alloc; /* # of page allocations for Rx buffers */
+ u64 rx_budget; /* NAPI iterations that exhausted their budget */
+ u64 rx_mem_drops; /* # of packets dropped due to memory shortage */
+ u64 rx_map_err; /* # of page DMA mapping errors */
+ u64 xdp_drops; /* XDP_DROPped packets */
+ u64 xdp_tx; /* successful XDP transmits */
+ u64 xdp_redir; /* successful XDP redirects */
+ u64 xdp_err; /* packets dropped due to XDP errors */
+};
+
+struct funeth_rxbuf { /* per Rx buffer state */
+ struct page *page; /* associated page */
+ dma_addr_t dma_addr; /* DMA address of page start */
+ int pg_refs; /* page refs held by driver */
+ int node; /* page node, or -1 if it is PF_MEMALLOC */
+};
+
+struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ unsigned int prod_cnt; /* producer counter */
+ unsigned int cons_cnt; /* consumer counter */
+ unsigned int mask; /* depth - 1 */
+};
+
+/* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
+struct funeth_rxq {
+ struct net_device *netdev;
+ struct napi_struct *napi;
+ struct device *dma_dev; /* device for DMA mappings */
+ void *cqes; /* base of CQ descriptor ring */
+ const void *next_cqe_info; /* fun_cqe_info of next CQE */
+ u32 __iomem *cq_db; /* CQ doorbell register address */
+ unsigned int cq_head; /* CQ head index */
+ unsigned int cq_mask; /* CQ depth - 1 */
+ u16 phase; /* CQ phase tag */
+ u16 qidx; /* queue index within net_device */
+ unsigned int irq_db_val; /* IRQ info for CQ doorbell */
+ struct fun_eprq_rqbuf *rqes; /* base of RQ descriptor ring */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ struct funeth_rxbuf *cur_buf; /* currently active buffer */
+ u32 __iomem *rq_db; /* RQ doorbell register address */
+ unsigned int rq_cons; /* RQ consumer counter */
+ unsigned int rq_mask; /* RQ depth - 1 */
+ unsigned int buf_offset; /* offset of next pkt in head buffer */
+ u8 xdp_flush; /* XDP flush types needed at NAPI end */
+ u8 init_state; /* queue initialization state */
+ u16 headroom; /* per packet headroom */
+ unsigned int rq_cons_db; /* value of rq_cons at last RQ db */
+ unsigned int rq_db_thres; /* # of new buffers needed to write RQ db */
+ struct funeth_rxbuf spare_buf; /* spare for next buffer replacement */
+ struct funeth_rx_cache cache; /* used buffer cache */
+ struct bpf_prog *xdp_prog; /* optional XDP BPF program */
+ struct funeth_rxq_stats stats;
+ dma_addr_t cq_dma_addr; /* DMA address of CQE ring */
+ dma_addr_t rq_dma_addr; /* DMA address of RQE ring */
+ u16 irq_cnt;
+ u32 hw_cqid; /* device ID of the queue's CQ */
+ u32 hw_sqid; /* device ID of the queue's SQ */
+ int numa_node;
+ struct u64_stats_sync syncp;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+#define FUN_QSTAT_INC(q, counter) \
+ do { \
+ u64_stats_update_begin(&(q)->syncp); \
+ (q)->stats.counter++; \
+ u64_stats_update_end(&(q)->syncp); \
+ } while (0)
+
+#define FUN_QSTAT_READ(q, seq, stats_copy) \
+ do { \
+ seq = u64_stats_fetch_begin(&(q)->syncp); \
+ stats_copy = (q)->stats; \
+ } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+
+#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
+
+struct fun_irq {
+ struct napi_struct napi;
+ struct funeth_txq *txq;
+ struct funeth_rxq *rxq;
+ u8 state;
+ u16 irq_idx; /* index of MSI-X interrupt */
+ int irq; /* Linux IRQ vector */
+ cpumask_t affinity_mask; /* IRQ affinity */
+ struct irq_affinity_notify aff_notify;
+ char name[FUN_INT_NAME_LEN];
+} ____cacheline_internodealigned_in_smp;
+
+/* Return the start address of the idx-th Tx descriptor. */
+static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
+ unsigned int idx)
+{
+ return q->desc + idx * FUNETH_SQE_SIZE;
+}
+
+static inline void fun_txq_wr_db(const struct funeth_txq *q)
+{
+ unsigned int tail = q->prod_cnt & q->mask;
+
+ writel(tail, q->db);
+}
+
+static inline int fun_irq_node(const struct fun_irq *p)
+{
+ return cpu_to_mem(cpumask_first(&p->affinity_mask));
+}
+
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
+int fun_txq_napi_poll(struct napi_struct *napi, int budget);
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp);
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq);
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state);
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp);
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq);
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state);
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog);
+
+#endif /* _FUNETH_TXRX_H */
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 54e51c8221b8..6cafee55efc3 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -857,8 +857,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
int i, j;
int err;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return 0;
priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
@@ -901,8 +900,7 @@ static void gve_free_qpls(struct gve_priv *priv)
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index e4e98aa7745f..021bbf308d68 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -439,7 +439,7 @@ static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
if (frag_size > rx->packet_buffer_size) {
packet_size_error = true;
netdev_warn(priv->dev,
- "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
+ "RX fragment error: packet_buffer_size=%d, frag_size=%d, dropping packet.",
rx->packet_buffer_size, be16_to_cpu(desc->len));
}
page_info = &rx->data.page_info[idx];
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index d7a27c244d48..54faf0f2d1d8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -887,8 +887,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
p[21] = net_stats->rx_compressed;
p[22] = net_stats->tx_compressed;
- p[23] = netdev->rx_dropped.counter;
- p[24] = netdev->tx_dropped.counter;
+ p[23] = 0; /* was netdev->rx_dropped.counter */
+ p[24] = 0; /* was netdev->tx_dropped.counter */
p[25] = priv->tx_timeout_count;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 9298fbecb31a..6f18c9a03231 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -167,6 +167,7 @@ struct hnae3_handle;
struct hnae3_queue {
void __iomem *io_base;
+ void __iomem *mem_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index babc5d7a3b52..0b8a73c40b12 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2028,9 +2028,73 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
+{
+#define HNS3_BYTES_PER_64BIT 8
+
+ struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
+ int offset = 0;
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ do {
+ int idx = (ring->next_to_use - num + ring->desc_num) %
+ ring->desc_num;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_push++;
+ u64_stats_update_end(&ring->syncp);
+ memcpy(&desc[offset], &ring->desc[idx],
+ sizeof(struct hns3_desc));
+ offset++;
+ } while (--num);
+
+ __iowrite64_copy(ring->tqp->mem_base, desc,
+ (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
+ HNS3_BYTES_PER_64BIT);
+
+ io_stop_wc();
+}
+
+static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
+{
+#define HNS3_MEM_DOORBELL_OFFSET 64
+
+ __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
+ &bd_num, 1);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_mem_doorbell += ring->pending_buf;
+ u64_stats_update_end(&ring->syncp);
+
+ io_stop_wc();
+}
+
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
bool doorbell)
{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ /* when tx push is enabled, the packet whose number of BD below
+ * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
+ */
+ if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ hns3_tx_push_bd(ring, num);
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ return;
+ }
+
ring->pending_buf += num;
if (!doorbell) {
@@ -2038,11 +2102,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
- if (!ring->pending_buf)
- return;
+ if (ring->tqp->mem_base)
+ hns3_tx_mem_doorbell(ring);
+ else
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
- writel(ring->pending_buf,
- ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
@@ -2732,6 +2797,9 @@ static void hns3_dump_queue_stats(struct net_device *ndev,
"seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
+ tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
}
static void hns3_dump_queue_reg(struct net_device *ndev,
@@ -5094,6 +5162,9 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a05a0c7423ce..4a3253692dcc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -7,6 +7,7 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
#include <net/page_pool.h>
+#include <asm/barrier.h>
#include "hnae3.h"
@@ -25,9 +26,12 @@ enum hns3_nic_state {
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
+ HNS3_NIC_STATE_TX_PUSH_ENABLE,
HNS3_NIC_STATE_MAX
};
+#define HNS3_MAX_PUSH_BD_NUM 2
+
#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
@@ -410,6 +414,8 @@ struct ring_stats {
u64 tx_pkts;
u64 tx_bytes;
u64 tx_more;
+ u64 tx_push;
+ u64 tx_mem_doorbell;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c06c39ece80d..6469238ae090 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -23,6 +23,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("more", tx_more),
+ HNS3_TQP_STAT("push", tx_push),
+ HNS3_TQP_STAT("mem_doorbell", tx_mem_doorbell),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 24f7afacae02..78d0498bdabc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1643,6 +1643,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -1676,6 +1677,14 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -11008,8 +11017,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
static int hclge_dev_mem_map(struct hclge_dev *hdev)
{
-#define HCLGE_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclge_hw *hw = &hdev->hw;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index adfb26e79262..fc92ae385e30 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -169,6 +169,14 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
#define HCLGE_TRIGGER_IMP_RESET_B 7U
+#define HCLGE_TQP_MEM_SIZE 0x10000
+#define HCLGE_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGE_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1)
+#define HCLGE_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i))
+
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
@@ -1060,11 +1068,6 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
-static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
-{
- return !!hdev->reset_pending;
-}
-
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 089f4444b7e3..1f87a8a3fe32 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1225,7 +1225,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
- "fw %08x does't support ets tc weight cmd\n",
+ "fw %08x doesn't support ets tc weight cmd\n",
hdev->fw_version);
ret = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 21442a9bb996..93389bec8d89 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -321,6 +321,7 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_comm_tqp *tqp;
int i;
@@ -354,6 +355,14 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
(i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
HCLGEVF_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGEVF_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -2546,8 +2555,6 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_hw *hw = &hdev->hw;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 502ca1ce1a90..4b00fd44f118 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -96,6 +96,14 @@
#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_TQP_MEM_SIZE 0x10000
+#define HCLGEVF_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGEVF_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGEVF_MEM_BAR) >> 1)
+#define HCLGEVF_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGEVF_NIC_MEM_OFFSET(hdev) + HCLGEVF_TQP_MEM_SIZE * (i))
+
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 451cb3d26cb5..d82eca563266 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -150,7 +150,7 @@ struct rfd_struct
#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_FTS 0x0080 /* Frame too short */
#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b423e94956f1..77683909ca3d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -60,6 +60,7 @@
#include <asm/hvcall.h>
#include <linux/atomic.h>
#include <asm/vio.h>
+#include <asm/xive.h>
#include <asm/iommu.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
@@ -1429,6 +1430,15 @@ static int __ibmvnic_open(struct net_device *netdev)
return rc;
}
+ adapter->tx_queues_active = true;
+
+ /* Since queues were stopped until now, there shouldn't be any
+ * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
+ * don't need the synchronize_rcu()? Leaving it for consistency
+ * with setting ->tx_queues_active = false.
+ */
+ synchronize_rcu();
+
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
@@ -1603,6 +1613,14 @@ static void ibmvnic_cleanup(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
/* ensure that transmissions are stopped if called by do_reset */
+
+ adapter->tx_queues_active = false;
+
+ /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
+ * update so they don't restart a queue after we stop it below.
+ */
+ synchronize_rcu();
+
if (test_bit(0, &adapter->resetting))
netif_tx_disable(netdev);
else
@@ -1842,14 +1860,21 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_buff->skb = NULL;
adapter->netdev->stats.tx_dropped++;
}
+
ind_bufp->index = 0;
+
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
- __netif_subqueue_stopped(adapter->netdev, queue_num) &&
- !test_bit(0, &adapter->resetting)) {
- netif_wake_subqueue(adapter->netdev, queue_num);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- queue_num);
+ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ rcu_read_lock();
+
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev, queue_num);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ queue_num);
+ }
+
+ rcu_read_unlock();
}
}
@@ -1904,11 +1929,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
int index = 0;
u8 proto = 0;
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, queue_num);
- ind_bufp = &tx_scrq->ind_buf;
-
- if (test_bit(0, &adapter->resetting)) {
+ /* If a reset is in progress, drop the packet since
+ * the scrqs may get torn down. Otherwise use the
+ * rcu to ensure reset waits for us to complete.
+ */
+ rcu_read_lock();
+ if (!adapter->tx_queues_active) {
dev_kfree_skb_any(skb);
tx_send_failed++;
@@ -1917,6 +1943,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, queue_num);
+ ind_bufp = &tx_scrq->ind_buf;
+
if (ibmvnic_xmit_workarounds(skb, netdev)) {
tx_dropped++;
tx_send_failed++;
@@ -1924,6 +1954,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
+
if (skb_is_gso(skb))
tx_pool = &adapter->tso_pool[queue_num];
else
@@ -2078,6 +2109,7 @@ tx_err:
netif_carrier_off(netdev);
}
out:
+ rcu_read_unlock();
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
netdev->stats.tx_packets += tx_packets;
@@ -3640,6 +3672,30 @@ static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
return rc;
}
+/* We can not use the IRQ chip EOI handler because that has the
+ * unintended effect of changing the interrupt priority.
+ */
+static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
+{
+ u64 val = 0xff000000 | scrq->hw_irq;
+ unsigned long rc;
+
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
+}
+
+/* Due to a firmware bug, the hypervisor can send an interrupt to a
+ * transmit or receive queue just prior to a partition migration.
+ * Force an EOI after migration.
+ */
+static void ibmvnic_clear_pending_interrupt(struct device *dev,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ if (!xive_enabled())
+ ibmvnic_xics_eoi(dev, scrq);
+}
+
static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -3653,15 +3709,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
-
- rc = plpar_hcall_norets(H_EOI, val);
- /* H_EOI would fail with rc = H_FUNCTION when running
- * in XIVE mode which is expected, but not an error.
- */
- if (rc && (rc != H_FUNCTION))
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ ibmvnic_clear_pending_interrupt(dev, scrq);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -3732,9 +3780,15 @@ restart_loop:
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) {
- netif_wake_subqueue(adapter->netdev, scrq->pool_index);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- scrq->pool_index);
+ rcu_read_lock();
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+ rcu_read_unlock();
}
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index fa2d607a7b1b..8f5cefb932dd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1006,11 +1006,14 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
- bool napi_enabled, from_passive_init;
- bool login_pending;
/* last device reset time */
unsigned long last_reset_time;
+ bool napi_enabled;
+ bool from_passive_init;
+ bool login_pending;
+ /* protected by rcu */
+ bool tx_queues_active;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c5bdef3ffe26..fa06f68c8c80 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7414,9 +7414,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, i, err;
s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
@@ -7430,17 +7430,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
bars = pci_select_bars(pdev, IORESOURCE_MEM);
@@ -7576,10 +7570,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - max_hw_frame_size */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0f0efee5fc8e..fd07c3679bb1 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -146,11 +146,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -210,11 +210,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Write PHY Red Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 80c5cecaf2b5..55c6bce5da61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -854,6 +854,10 @@ struct i40e_vsi {
u64 tx_force_wb;
u64 rx_buf_failed;
u64 rx_page_failed;
+ u64 rx_page_reuse;
+ u64 rx_page_alloc;
+ u64 rx_page_waive;
+ u64 rx_page_busy;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 7abef88801fb..42439f725aa4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -769,7 +769,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
}
/**
- * i40e_asq_send_command_atomic - send command to Admin Queue
+ * i40e_asq_send_command_atomic_exec - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
@@ -780,11 +780,13 @@ static bool i40e_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status
-i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- bool is_atomic_context)
+static i40e_status
+i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
@@ -794,8 +796,6 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
u16 retval = 0;
u32 val = 0;
- mutex_lock(&hw->aq.asq_mutex);
-
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
@@ -969,6 +969,36 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
}
asq_send_command_error:
+ return status;
+}
+
+/**
+ * i40e_asq_send_command_atomic - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine.
+ **/
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
+{
+ i40e_status status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+ cmd_details,
+ is_atomic_context);
+
mutex_unlock(&hw->aq.asq_mutex);
return status;
}
@@ -983,6 +1013,52 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
}
/**
+ * i40e_asq_send_command_atomic_v2 - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine. Returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+i40e_status
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status)
+{
+ i40e_status status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+ buff_size,
+ cmd_details,
+ is_atomic_context);
+ if (aq_status)
+ *aq_status = hw->aq.asq_last_status;
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+i40e_status
+i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 9ddeb015eb7e..6aefffd83615 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1899,8 +1899,9 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
if (status)
goto aq_add_vsi_exit;
@@ -2287,8 +2288,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
@@ -2632,33 +2634,28 @@ get_veb_exit:
}
/**
- * i40e_aq_add_macvlan
- * @hw: pointer to the hw struct
- * @seid: VSI for the mac address
+ * i40e_prepare_add_macvlan
* @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
* @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI for the mac address
*
- * Add MAC/VLAN addresses to the HW filtering
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
**/
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+ struct i40e_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
+ (struct i40e_aqc_macvlan *)&desc->params.raw;
u16 buf_size;
int i;
- if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
-
buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
@@ -2669,14 +2666,71 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ return buf_size;
+}
- return status;
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+i40e_status
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
}
/**
@@ -2715,13 +2769,59 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
return status;
}
/**
+ * i40e_aq_remove_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+i40e_status
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aqc_macvlan *cmd;
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
* @hw: pointer to the hw struct
* @opcode: AQ opcode for add or delete mirror rule
@@ -3868,7 +3968,8 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
cmd->seid = cpu_to_le16(seid);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 9db5001297c7..be7c6f34d45c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -275,9 +275,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.alloc_page_failed,
rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
+ " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
i,
- rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 091f36adbbe1..e48499624d22 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -295,6 +295,10 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_busy", tx_busy),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+ I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
+ I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
+ I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 31b03fe78d3b..6778df2177a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -773,6 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
**/
static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
+ u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
struct i40e_pf *pf = vsi->back;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
@@ -780,7 +781,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *es; /* device's eth stats */
u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -806,6 +806,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
rx_page = 0;
rx_buf = 0;
+ rx_reuse = 0;
+ rx_alloc = 0;
+ rx_waive = 0;
+ rx_busy = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
@@ -839,6 +843,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed;
+ rx_reuse += p->rx_stats.page_reuse_count;
+ rx_alloc += p->rx_stats.page_alloc_count;
+ rx_waive += p->rx_stats.page_waive_count;
+ rx_busy += p->rx_stats.page_busy_count;
if (i40e_enabled_xdp_vsi(vsi)) {
/* locate XDP ring */
@@ -866,6 +874,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_force_wb = tx_force_wb;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
+ vsi->rx_page_reuse = rx_reuse;
+ vsi->rx_page_alloc = rx_alloc;
+ vsi->rx_page_waive = rx_waive;
+ vsi->rx_page_busy = rx_busy;
ns->rx_packets = rx_p;
ns->rx_bytes = rx_b;
@@ -2143,19 +2155,19 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
+ enum i40e_admin_queue_err aq_status;
i40e_status aq_ret;
- int aq_err;
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
- aq_err = hw->aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ &aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %s, aq_err %s\n",
vsi_name, i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
+ i40e_aq_str(hw, aq_status));
}
}
@@ -2178,10 +2190,10 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- int aq_err, fcnt;
+ enum i40e_admin_queue_err aq_status;
+ int fcnt;
- i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
- aq_err = hw->aq.asq_last_status;
+ i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
@@ -2189,17 +2201,19 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err), vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi->type);
}
}
}
@@ -12712,7 +12726,8 @@ static int i40e_set_features(struct net_device *netdev,
else
i40e_vlan_stripping_disable(vsi);
- if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+ if (!(features & NETIF_F_HW_TC) &&
+ (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
dev_err(&pf->pdev->dev,
"Offloaded tc filters active, can't turn hw_tc_offload off");
return -EINVAL;
@@ -13468,6 +13483,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->features &= ~NETIF_F_HW_TC;
+
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
@@ -15331,12 +15348,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
/* set up pci connections */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index fe6dca846028..3a38bf8bcde7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -682,10 +682,11 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- le_sum = cpu_to_le16(checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
+ }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9241b6005ad3..ebdcde6f1aeb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,10 +27,25 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
i40e_status
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+i40e_status
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
+i40e_status
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -150,9 +165,19 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
struct i40e_asq_cmd_details *cmd_details,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 66cc79500c10..0eae5858f2fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -1382,8 +1380,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
- rx_ring->rx_stats.page_reuse_count++;
-
/* clear contents of buffer_info */
old_buff->page = NULL;
}
@@ -1433,13 +1429,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
- if (ring_is_xdp(tx_ring)) {
- tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
- GFP_KERNEL);
- if (!tx_ring->xsk_descs)
- goto err;
- }
-
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1463,8 +1452,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;
@@ -1675,6 +1662,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
return false;
}
+ rx_ring->rx_stats.page_alloc_count++;
+
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
i40e_rx_pg_size(rx_ring),
@@ -1982,32 +1971,43 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
/**
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
+ * @rx_stats: rx stats structure for the rx ring
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling i40e_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
+ * page freed.
+ *
+ * rx_stats will be updated to indicate whether the page was waived
+ * or busy if it could not be reused.
*/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ struct i40e_rx_queue_stats *rx_stats,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
+ if (!dev_page_is_reusable(page)) {
+ rx_stats->page_waive_count++;
return false;
+ }
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
+ rx_stats->page_busy_count++;
return false;
+ }
#else
#define I40E_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
- if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
+ rx_stats->page_busy_count++;
return false;
+ }
#endif
/* If we have drained the page fragment pool we need to update
@@ -2237,7 +2237,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index bfc2845c99d1..c471c2da313c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -298,7 +298,9 @@ struct i40e_rx_queue_stats {
u64 alloc_page_failed;
u64 alloc_buff_failed;
u64 page_reuse_count;
- u64 realloc_count;
+ u64 page_alloc_count;
+ u64 page_waive_count;
+ u64 page_busy_count;
};
enum i40e_ring_state_t {
@@ -390,7 +392,6 @@ struct i40e_ring {
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
- struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 945b1bb9c6f4..c1d25b0b0ca2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -218,7 +218,6 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
ntu += nb_buffs;
if (ntu == rx_ring->count) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- xdp = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -241,21 +240,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto out;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
out:
xsk_buff_free(xdp);
@@ -324,11 +327,11 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
+ u16 cleaned_count;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -467,11 +470,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
- struct xdp_desc *descs = xdp_ring->xsk_descs;
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 4babe4705a55..49aed3e506a6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -44,6 +44,9 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
+int iavf_status_to_errno(enum iavf_status status);
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
+
/* VSI state flags shared with common code */
enum iavf_vsi_state_t {
__IAVF_VSI_DOWN,
@@ -188,7 +191,7 @@ enum iavf_state_t {
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
- __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
+ __IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */
__IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
@@ -334,6 +337,21 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
+ /* flags for processing extended capability messages during
+ * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
+ * both a SEND and a RECV step, which must be processed in sequence.
+ *
+ * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will
+ * process one flag at a time during each state loop.
+ */
+ u64 extended_caps;
+#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
+#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+
+#define IAVF_EXTENDED_CAPS \
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -515,7 +533,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
void iavf_request_stats(struct iavf_adapter *adapter);
-void iavf_request_reset(struct iavf_adapter *adapter);
+int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index e9cc7f6ddc46..34e46a23894f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_INVALID_MAC_ADDR";
case IAVF_ERR_DEVICE_NOT_SUPPORTED:
return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
- case IAVF_ERR_MASTER_REQUESTS_PENDING:
- return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
case IAVF_ERR_INVALID_LINK_SETTINGS:
return "IAVF_ERR_INVALID_LINK_SETTINGS";
case IAVF_ERR_AUTONEG_NOT_COMPLETE:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 0e178a0a59c5..190590d32faf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -51,6 +51,113 @@ MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
struct workqueue_struct *iavf_wq;
+int iavf_status_to_errno(enum iavf_status status)
+{
+ switch (status) {
+ case IAVF_SUCCESS:
+ return 0;
+ case IAVF_ERR_PARAM:
+ case IAVF_ERR_MAC_TYPE:
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ case IAVF_ERR_INVALID_PD_ID:
+ case IAVF_ERR_INVALID_QP_ID:
+ case IAVF_ERR_INVALID_CQ_ID:
+ case IAVF_ERR_INVALID_CEQ_ID:
+ case IAVF_ERR_INVALID_AEQ_ID:
+ case IAVF_ERR_INVALID_SIZE:
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ case IAVF_ERR_INVALID_VF_ID:
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ case IAVF_ERR_INVALID_SD_INDEX:
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ case IAVF_ERR_INVALID_SD_TYPE:
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return -EINVAL;
+ case IAVF_ERR_NVM:
+ case IAVF_ERR_NVM_CHECKSUM:
+ case IAVF_ERR_PHY:
+ case IAVF_ERR_CONFIG:
+ case IAVF_ERR_UNKNOWN_PHY:
+ case IAVF_ERR_LINK_SETUP:
+ case IAVF_ERR_ADAPTER_STOPPED:
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ case IAVF_ERR_RESET_FAILED:
+ case IAVF_ERR_BAD_PTR:
+ case IAVF_ERR_SWFW_SYNC:
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ case IAVF_ERR_QUEUE_EMPTY:
+ case IAVF_ERR_FLUSHED_QUEUE:
+ case IAVF_ERR_OPCODE_MISMATCH:
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ case IAVF_ERR_MEMCPY_FAILED:
+ case IAVF_ERR_SRQ_ENABLED:
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ case IAVF_ERR_BAD_IWARP_CQE:
+ case IAVF_ERR_NVM_BLANK_MODE:
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return -EIO;
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return -ENODEV;
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ case IAVF_ERR_RING_FULL:
+ return -ENOSPC;
+ case IAVF_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case IAVF_ERR_TIMEOUT:
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return -ETIMEDOUT;
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ case IAVF_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return -EALREADY;
+ case IAVF_ERR_NOT_READY:
+ return -EBUSY;
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return -EMSGSIZE;
+ }
+
+ return -EIO;
+}
+
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
+{
+ switch (v_status) {
+ case VIRTCHNL_STATUS_SUCCESS:
+ return 0;
+ case VIRTCHNL_STATUS_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+ return -EINVAL;
+ case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+ case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+ case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+ return -EIO;
+ case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ }
+
+ return -EIO;
+}
+
/**
* iavf_pdev_to_adapter - go from pci_dev to adapter
* @pdev: pci_dev pointer
@@ -877,6 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
f->is_new_mac = true;
+ f->is_primary = false;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -910,17 +1018,22 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
+ f->is_primary = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
f = iavf_add_filter(adapter, addr->sa_data);
-
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
if (f) {
+ f->is_primary = true;
ether_addr_copy(hw->mac.addr, addr->sa_data);
}
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ /* schedule the watchdog task to immediately process the request */
+ if (f)
+ queue_work(iavf_wq, &adapter->watchdog_task.work);
+
return (f == NULL) ? -ENOMEM : 0;
}
@@ -1421,7 +1534,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
struct iavf_aqc_get_set_rss_key_data *rss_key =
(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
struct iavf_hw *hw = &adapter->hw;
- int ret = 0;
+ enum iavf_status status;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1430,24 +1543,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
return -EBUSY;
}
- ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
- if (ret) {
+ status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
+ return iavf_status_to_errno(status);
}
- ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
- adapter->rss_lut, adapter->rss_lut_size);
- if (ret) {
+ status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
+ return iavf_status_to_errno(status);
}
- return ret;
+ return 0;
}
@@ -1517,7 +1631,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
static int iavf_init_rss(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- int ret;
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
@@ -1533,9 +1646,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
iavf_fill_rss_lut(adapter);
netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
- ret = iavf_config_rss(adapter);
- return ret;
+ return iavf_config_rss(adapter);
}
/**
@@ -2003,23 +2115,24 @@ static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
- int err;
+ enum iavf_status status;
+ int ret;
WARN_ON(adapter->state != __IAVF_STARTUP);
/* driver loaded, probe complete */
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- err = iavf_set_mac_type(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
+ status = iavf_set_mac_type(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
goto err;
}
- err = iavf_check_reset_complete(hw);
- if (err) {
+ ret = iavf_check_reset_complete(hw);
+ if (ret) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ ret);
goto err;
}
hw->aq.num_arq_entries = IAVF_AQ_LEN;
@@ -2027,14 +2140,15 @@ static void iavf_startup(struct iavf_adapter *adapter)
hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- err = iavf_init_adminq(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
+ status = iavf_init_adminq(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ status);
goto err;
}
- err = iavf_send_api_ver(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+ ret = iavf_send_api_ver(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
iavf_shutdown_adminq(hw);
goto err;
}
@@ -2070,7 +2184,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter)
/* aq msg sent, awaiting reply */
err = iavf_verify_api_ver(adapter);
if (err) {
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ if (err == -EALREADY)
err = iavf_send_api_ver(adapter);
else
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
@@ -2171,11 +2285,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
}
err = iavf_get_vf_config(adapter);
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
goto err_alloc;
- } else if (err == IAVF_ERR_PARAM) {
- /* We only get ERR_PARAM if the device is in a very bad
+ } else if (err == -EINVAL) {
+ /* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
*/
@@ -2189,26 +2303,18 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
err = iavf_parse_vf_resource_msg(adapter);
- if (err)
- goto err_alloc;
-
- err = iavf_send_vf_offload_vlan_v2_msg(adapter);
- if (err == -EOPNOTSUPP) {
- /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
- * go directly to finishing initialization
- */
- iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
- return;
- } else if (err) {
- dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
+ if (err) {
+ dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
err);
goto err_alloc;
}
-
- /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
- * state accordingly
+ /* Some features require additional messages to negotiate extended
+ * capabilities. These are processed in sequence by the
+ * __IAVF_INIT_EXTENDED_CAPS driver state.
*/
- iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ adapter->extended_caps = IAVF_EXTENDED_CAPS;
+
+ iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
return;
err_alloc:
@@ -2219,35 +2325,93 @@ err:
}
/**
- * iavf_init_get_offload_vlan_v2_caps - part of driver startup
+ * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
* @adapter: board private structure
*
- * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
- * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
- * not negotiated, then this state will never be entered.
+ * Function processes send of the extended VLAN V2 capability message to the
+ * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
+ * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ */
+static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
+
+ ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret && ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
+ * we did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
+}
+
+/**
+ * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the extended VLAN V2 capability message from
+ * the PF.
**/
-static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
{
int ret;
- WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
ret = iavf_get_vf_vlan_v2_caps(adapter);
- if (ret) {
- if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
- iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret)
goto err;
- }
- iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ /* We've processed receipt of the VLAN V2 caps message */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
return;
err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
+ * iavf_init_process_extended_caps - Part of driver startup
+ * @adapter: board private structure
+ *
+ * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
+ * handles negotiating capabilities for features which require an additional
+ * message.
+ *
+ * Once all extended capabilities exchanges are finished, the driver will
+ * transition into __IAVF_INIT_CONFIG_ADAPTER.
+ */
+static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
+{
+ WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
+
+ /* Process capability exchange for VLAN V2 */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
+ iavf_init_send_offload_vlan_v2_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
+ iavf_init_recv_offload_vlan_v2_caps(adapter);
+ return;
+ }
+
+ /* When we reach here, no further extended capabilities exchanges are
+ * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
+ */
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+}
+
+/**
* iavf_init_config_adapter - last part of driver startup
* @adapter: board private structure
*
@@ -2411,8 +2575,8 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
- case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
- iavf_init_get_offload_vlan_v2_caps(adapter);
+ case __IAVF_INIT_EXTENDED_CAPS:
+ iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
@@ -2626,6 +2790,7 @@ static void iavf_reset_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf;
+ enum iavf_status status;
u32 reg_val;
int i = 0, err;
bool running;
@@ -2727,10 +2892,12 @@ continue_reset:
/* kill and reinit the admin queue */
iavf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- err = iavf_init_adminq(hw);
- if (err)
+ status = iavf_init_adminq(hw);
+ if (status) {
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
- err);
+ status);
+ goto reset_err;
+ }
adapter->aq_required = 0;
if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
@@ -4433,12 +4600,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_regions(pdev, iavf_driver_name);
@@ -4767,8 +4931,6 @@ static struct pci_driver iavf_driver = {
**/
static int __init iavf_init_module(void)
{
- int ret;
-
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
@@ -4779,8 +4941,7 @@ static int __init iavf_init_module(void)
pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
return -ENOMEM;
}
- ret = pci_register_driver(&iavf_driver);
- return ret;
+ return pci_register_driver(&iavf_driver);
}
module_init(iavf_init_module);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 46e3d1f6b604..2ea5c7c339bc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -18,7 +18,7 @@ enum iavf_status {
IAVF_ERR_ADAPTER_STOPPED = -9,
IAVF_ERR_INVALID_MAC_ADDR = -10,
IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
- IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12,
IAVF_ERR_INVALID_LINK_SETTINGS = -13,
IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
IAVF_ERR_RESET_FAILED = -15,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 8cbe7ad1347c..978f651c6b09 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -374,29 +374,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
return &q_vector->rx == rc;
}
-static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
+#define IAVF_AIM_MULTIPLIER_100G 2560
+#define IAVF_AIM_MULTIPLIER_50G 1280
+#define IAVF_AIM_MULTIPLIER_40G 1024
+#define IAVF_AIM_MULTIPLIER_20G 512
+#define IAVF_AIM_MULTIPLIER_10G 256
+#define IAVF_AIM_MULTIPLIER_1G 32
+
+static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
{
- unsigned int divisor;
+ switch (speed_mbps) {
+ case SPEED_100000:
+ return IAVF_AIM_MULTIPLIER_100G;
+ case SPEED_50000:
+ return IAVF_AIM_MULTIPLIER_50G;
+ case SPEED_40000:
+ return IAVF_AIM_MULTIPLIER_40G;
+ case SPEED_25000:
+ case SPEED_20000:
+ return IAVF_AIM_MULTIPLIER_20G;
+ case SPEED_10000:
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
+ case SPEED_1000:
+ case SPEED_100:
+ return IAVF_AIM_MULTIPLIER_1G;
+ }
+}
- switch (q_vector->adapter->link_speed) {
+static unsigned int
+iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
+{
+ switch (speed_virtchnl) {
case VIRTCHNL_LINK_SPEED_40GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
- break;
+ return IAVF_AIM_MULTIPLIER_40G;
case VIRTCHNL_LINK_SPEED_25GB:
case VIRTCHNL_LINK_SPEED_20GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
- break;
- default:
+ return IAVF_AIM_MULTIPLIER_20G;
case VIRTCHNL_LINK_SPEED_10GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
- break;
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
case VIRTCHNL_LINK_SPEED_1GB:
case VIRTCHNL_LINK_SPEED_100MB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
- break;
+ return IAVF_AIM_MULTIPLIER_1G;
}
+}
- return divisor;
+static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
+{
+ if (ADV_LINK_SUPPORT(adapter))
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
+ else
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_virtchnl_itr_multiplier(adapter->link_speed);
}
/**
@@ -586,8 +617,9 @@ adjust_by_size:
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
- itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
- IAVF_ITR_ADAPTIVE_MIN_INC;
+ itr += DIV_ROUND_UP(avg_wire_size,
+ iavf_itr_divisor(q_vector->adapter)) *
+ IAVF_ITR_ADAPTIVE_MIN_INC;
if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
itr &= IAVF_ITR_ADAPTIVE_LATENCY;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 5263cefe46f5..782450d5c12f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -22,17 +22,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct iavf_hw *hw = &adapter->hw;
- enum iavf_status err;
+ enum iavf_status status;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
- err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
- if (err)
- dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
- op, iavf_stat_str(hw, err),
+ status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (status)
+ dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
+ op, iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return err;
+ return iavf_status_to_errno(status);
}
/**
@@ -55,6 +55,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
}
/**
+ * iavf_poll_virtchnl_msg
+ * @hw: HW configuration structure
+ * @event: event to populate on success
+ * @op_to_poll: requested virtchnl op to poll for
+ *
+ * Initialize poll for virtchnl msg matching the requested_op. Returns 0
+ * if a message of the correct opcode is in the queue or an error code
+ * if no message matching the op code is waiting and other failures.
+ */
+static int
+iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
+ enum virtchnl_ops op_to_poll)
+{
+ enum virtchnl_ops received_op;
+ enum iavf_status status;
+ u32 v_retval;
+
+ while (1) {
+ /* When the AQ is empty, iavf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ status = iavf_clean_arq_element(hw, event, NULL);
+ if (status != IAVF_SUCCESS)
+ return iavf_status_to_errno(status);
+ received_op =
+ (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+ if (op_to_poll == received_op)
+ break;
+ }
+
+ v_retval = le32_to_cpu(event->desc.cookie_low);
+ return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
+}
+
+/**
* iavf_verify_api_ver
* @adapter: adapter structure
*
@@ -65,55 +100,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
**/
int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
- struct virtchnl_version_info *pf_vvi;
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- err = iavf_clean_arq_element(hw, &event, NULL);
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_VERSION)
- break;
- }
-
+ event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
+ if (!err) {
+ struct virtchnl_version_info *pf_vvi =
+ (struct virtchnl_version_info *)event.msg_buf;
+ adapter->pf_version = *pf_vvi;
- if (op != VIRTCHNL_OP_VERSION) {
- dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
- op);
- err = -EIO;
- goto out_alloc;
+ if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
+ (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
+ pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
}
- pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
- adapter->pf_version = *pf_vvi;
-
- if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
- err = -EIO;
-
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -208,33 +216,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
u16 len;
+ int err;
- len = sizeof(struct virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
- break;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
@@ -243,48 +235,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
if (!err)
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(hw, adapter->vf_res);
-out_alloc:
+
kfree(event.msg_buf);
-out:
+
return err;
}
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
{
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
u16 len;
- len = sizeof(struct virtchnl_vlan_caps);
+ len = sizeof(struct virtchnl_vlan_caps);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
- break;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
+ if (!err)
+ memcpy(&adapter->vlan_v2_caps, event.msg_buf,
+ min(event.msg_len, len));
- memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -454,6 +430,20 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_mac_addr_type - Set the correct request type from the filter type
+ * @virtchnl_ether_addr: pointer to requested list element
+ * @filter: pointer to requested filter
+ **/
+static void
+iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
+ const struct iavf_mac_filter *filter)
+{
+ virtchnl_ether_addr->type = filter->is_primary ?
+ VIRTCHNL_ETHER_ADDR_PRIMARY :
+ VIRTCHNL_ETHER_ADDR_EXTRA;
+}
+
+/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
@@ -508,6 +498,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
f->add = false;
if (i == count)
@@ -577,6 +568,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
list_del(&f->list);
kfree(f);
@@ -1827,11 +1819,13 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
*
* Request that the PF reset this VF. No response is expected.
**/
-void iavf_request_reset(struct iavf_adapter *adapter)
+int iavf_request_reset(struct iavf_adapter *adapter)
{
+ int err;
/* Don't check CURRENT_OP - this is always higher priority */
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index c36faa7d1471..9183d480b70b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -18,8 +18,12 @@ ice-y := ice_main.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_fltr.o \
+ ice_pf_vsi_vlan_ops.o \
+ ice_vsi_vlan_ops.o \
+ ice_vsi_vlan_lib.o \
ice_fdir.o \
ice_ethtool_fdir.o \
+ ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
@@ -29,9 +33,16 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
+ice-$(CONFIG_PCI_IOV) += \
+ ice_sriov.o \
+ ice_virtchnl.o \
+ ice_virtchnl_allowlist.o \
+ ice_virtchnl_fdir.o \
+ ice_vf_mbx.o \
+ ice_vf_vsi_vlan_ops.o \
+ ice_vf_lib.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
+ice-$(CONFIG_TTY) += ice_gnss.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index bea1d1e39fa2..b0b27bfcd7a2 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -51,9 +51,7 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
-#if IS_ENABLED(CONFIG_DCB)
-#include <scsi/iscsi_proto.h>
-#endif /* CONFIG_DCB */
+#include <net/gtp.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -63,8 +61,8 @@
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
-#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_vf_mbx.h"
#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
@@ -72,6 +70,8 @@
#include "ice_repr.h"
#include "ice_eswitch.h"
#include "ice_lag.h"
+#include "ice_vsi_vlan_ops.h"
+#include "ice_gnss.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -107,7 +107,6 @@
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
-#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
@@ -183,6 +182,7 @@
enum ice_feature {
ICE_F_DSCP,
ICE_F_SMA_CTRL,
+ ICE_F_GNSS,
ICE_F_MAX
};
@@ -290,6 +290,7 @@ enum ice_pf_state {
ICE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_PHY_INIT_COMPLETE,
ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
+ ICE_AUX_ERR_PENDING,
ICE_STATE_NBITS /* must be last */
};
@@ -330,7 +331,7 @@ struct ice_vsi {
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- s16 vf_id; /* VF ID for SR-IOV VSIs */
+ struct ice_vf *vf; /* VF associated with this VSI */
u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
@@ -367,6 +368,8 @@ struct ice_vsi {
u8 irqs_ready:1;
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
+ struct ice_vsi_vlan_ops inner_vlan_ops;
+ struct ice_vsi_vlan_ops outer_vlan_ops;
u16 num_vlan;
/* queue information */
@@ -467,7 +470,6 @@ enum ice_pf_flags {
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
ICE_FLAG_PTP, /* PTP is enabled by software */
- ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -481,9 +483,11 @@ enum ice_pf_flags {
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
+ ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
+ ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -524,15 +528,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
- /* Virtchnl/SR-IOV config info */
- struct ice_vf *vf;
- u16 num_alloc_vfs; /* actual number of VFs allocated */
- u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_qps_per_vf;
- u16 num_msix_per_vf;
- /* used to ratelimit the MDD event logging */
- unsigned long last_printed_mdd_jiffies;
- DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
@@ -547,6 +543,9 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
struct ice_ptp ptp;
+ struct tty_driver *ice_gnss_tty_driver;
+ struct tty_port gnss_tty_port;
+ struct gnss_serial *gnss_serial;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
@@ -559,6 +558,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 oicr_err_reg;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
@@ -834,6 +834,9 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
@@ -887,7 +890,6 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
@@ -909,6 +911,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ad1dcfa5ff65..b25e27c4d887 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -226,6 +226,15 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Set Port parameters, (direct, 0x0203) */
+struct ice_aqc_set_port_params {
+ __le16 cmd_flags;
+#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
+ __le16 bad_frame_vsi;
+ __le16 swid;
+ u8 reserved[10];
+};
+
/* These resource type defines are used for all switch resource
* commands where a resource type is required, such as:
* Get Resource Allocation command (indirect 0x0204)
@@ -283,6 +292,40 @@ struct ice_aqc_alloc_free_res_elem {
struct ice_aqc_res_elem elem[];
};
+/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */
+struct ice_aqc_set_vlan_mode {
+ u8 reserved;
+ u8 l2tag_prio_tagging;
+#define ICE_AQ_VLAN_PRIO_TAG_S 0
+#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S)
+#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0
+#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3
+#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7
+ u8 l2tag_reserved[64];
+ u8 rdma_packet;
+#define ICE_AQ_VLAN_RDMA_TAG_S 0
+#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S)
+#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10
+#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A
+ u8 rdma_reserved[2];
+ u8 mng_vlan_prot_id;
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11
+ u8 prot_id_reserved[30];
+};
+
+/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */
+struct ice_aqc_get_vlan_mode {
+ u8 vlan_mode;
+#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0)
+ u8 l2tag_prio_tagging;
+ u8 reserved[98];
+};
+
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@@ -343,108 +386,113 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
u8 sw_flags2;
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
-#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
- (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
-#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
/* security section */
u8 sec_flags;
#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- u8 pvlan_reserved[2];
- u8 vlan_flags;
-#define ICE_AQ_VSI_VLAN_MODE_S 0
-#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
-#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
-#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
-#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
-#define ICE_AQ_VSI_VLAN_EMOD_S 3
-#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
- u8 pvlan_reserved2[3];
+ __le16 port_based_inner_vlan; /* VLANS include priority bits */
+ u8 inner_vlan_reserved[2];
+ u8 inner_vlan_flags;
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+ u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
-#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
-#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
-#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
-#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
-#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
-#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
-#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
-#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
-#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
-#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
-#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
-#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
-#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
-#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
-#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
-#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
__le32 egress_table; /* same defines as for ingress table */
/* outer tags section */
- __le16 outer_tag;
- u8 outer_tag_flags;
-#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
-#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
-#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
-#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
-#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
-#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
-#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
- u8 outer_tag_reserved;
+ __le16 port_based_outer_vlan;
+ u8 outer_vlan_flags;
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7)
+ u8 outer_vlan_reserved;
/* queue mapping section */
__le16 mapping_flags;
-#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
-#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
__le16 q_mapping[16];
-#define ICE_AQ_VSI_Q_S 0
-#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
__le16 tc_mapping[8];
-#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
-#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
-#define ICE_AQ_VSI_TC_Q_NUM_S 11
-#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
/* queueing option section */
u8 q_opt_rss;
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
u8 q_opt_tc;
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
-#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
u8 q_opt_flags;
-#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
u8 q_opt_reserved[3];
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
@@ -452,27 +500,27 @@ struct ice_aqc_vsi_props {
__le16 sect_10_reserved;
/* flow director section */
__le16 fd_options;
-#define ICE_AQ_VSI_FD_ENABLE BIT(0)
-#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
-#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
__le16 max_fd_fltr_dedicated;
__le16 max_fd_fltr_shared;
__le16 fd_def_q;
-#define ICE_AQ_VSI_FD_DEF_Q_S 0
-#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
-#define ICE_AQ_VSI_FD_DEF_GRP_S 12
-#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
__le16 fd_report_opt;
-#define ICE_AQ_VSI_FD_REPORT_Q_S 0
-#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
-#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
/* PASID section */
__le32 pasid_id;
-#define ICE_AQ_VSI_PASID_ID_S 0
-#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
-#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
u8 reserved[24];
};
@@ -489,9 +537,13 @@ struct ice_aqc_add_get_recipe {
struct ice_aqc_recipe_content {
u8 rid;
+#define ICE_AQ_RECIPE_ID_S 0
+#define ICE_AQ_RECIPE_ID_M (0x3F << ICE_AQ_RECIPE_ID_S)
#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
#define ICE_AQ_SW_ID_LKUP_IDX 0
u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_DATA_S 0
+#define ICE_AQ_RECIPE_LKUP_DATA_M (0x3F << ICE_AQ_RECIPE_LKUP_DATA_S)
#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
__le16 mask[5];
@@ -502,15 +554,25 @@ struct ice_aqc_recipe_content {
u8 rsvd0[3];
u8 act_ctrl_join_priority;
u8 act_ctrl_fwd_priority;
+#define ICE_AQ_RECIPE_FWD_PRIORITY_S 0
+#define ICE_AQ_RECIPE_FWD_PRIORITY_M (0xF << ICE_AQ_RECIPE_FWD_PRIORITY_S)
u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_NEED_PASS_L2 BIT(0)
+#define ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2 BIT(1)
#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_S 4
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_M (0x3 << ICE_AQ_RECIPE_ACT_PRUNE_INDX_S)
u8 rsvd1;
__le32 dflt_act;
+#define ICE_AQ_RECIPE_DFLT_ACT_S 0
+#define ICE_AQ_RECIPE_DFLT_ACT_M (0x7FFFF << ICE_AQ_RECIPE_DFLT_ACT_S)
+#define ICE_AQ_RECIPE_DFLT_ACT_VALID BIT(31)
};
struct ice_aqc_recipe_data_elem {
u8 recipe_indx;
u8 resp_bits;
+#define ICE_AQ_RECIPE_WAS_UPDATED BIT(0)
u8 rsvd0[2];
u8 recipe_bitmap[8];
u8 rsvd1[4];
@@ -1339,6 +1401,24 @@ struct ice_aqc_get_link_topo {
u8 rsvd[9];
};
+/* Read I2C (direct, 0x06E2) */
+struct ice_aqc_i2c {
+ struct ice_aqc_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define ICE_AQC_I2C_DATA_SIZE_M GENMASK(3, 0)
+#define ICE_AQC_I2C_USE_REPEATED_START BIT(7)
+
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+ u8 rsvd2[4];
+};
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ice_aqc_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1883,7 +1963,7 @@ struct ice_aqc_get_clear_fw_log {
};
/* Download Package (indirect 0x0C40) */
-/* Also used for Update Package (indirect 0x0C42) */
+/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@@ -2009,6 +2089,7 @@ struct ice_aq_desc {
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_add_get_recipe add_get_recipe;
struct ice_aqc_recipe_to_profile recipe_to_profile;
@@ -2049,6 +2130,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_i2c read_i2c;
+ struct ice_aqc_read_i2c_resp read_i2c_resp;
} params;
};
@@ -2110,10 +2193,13 @@ enum ice_adminq_opc {
/* internal switch commands */
ice_aqc_opc_get_sw_cfg = 0x0200,
+ ice_aqc_opc_set_port_params = 0x0203,
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
+ ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
/* VSI commands */
ice_aqc_opc_add_vsi = 0x0210,
@@ -2160,6 +2246,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
+ ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_set_port_id_led = 0x06E9,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
@@ -2204,6 +2291,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_upload_section = 0x0C41,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 80ed76f0cace..9669ad9bf7b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -3,6 +3,9 @@
#ifndef _ICE_ARFS_H_
#define _ICE_ARFS_H_
+
+#include "ice_fdir.h"
+
enum ice_arfs_fltr_state {
ICE_ARFS_INACTIVE,
ICE_ARFS_ACTIVE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1a5ece3bce79..136d7911adb4 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include "ice_sriov.h"
static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
{
@@ -322,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
@@ -418,8 +419,22 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
rlan_ctx.crcstrip = 1;
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
+ * and it needs to remain 1 for non-DVM capable configurations to not
+ * break backward compatibility for VF drivers. Setting this field to 0
+ * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
+ * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
+ * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
+ * check for the tag
+ */
+ if (ice_is_dvm_ena(hw))
+ if (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_port_vlan_ena(vsi->vf))
+ rlan_ctx.l2tsel = 1;
+ else
+ rlan_ctx.l2tsel = 0;
+ else
+ rlan_ctx.l2tsel = 1;
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e2af99a763ed..9619bdb9e49a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1518,16 +1518,27 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
- * Package, Get Version, Get Package Info List and Release Resource
- * (with resource ID set to Global Config Lock) AdminQ commands are
- * allowed; all others must block until the package download completes
- * and the Global Config Lock is released. See also
- * ice_acquire_global_cfg_lock().
+ * Package, Get Version, Get Package Info List, Upload Section,
+ * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
+ * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
+ * Recipes to Profile Association, and Release Resource (with resource
+ * ID set to Global Config Lock) AdminQ commands are allowed; all others
+ * must block until the package download completes and the Global Config
+ * Lock is released. See also ice_acquire_global_cfg_lock().
*/
switch (le16_to_cpu(desc->opcode)) {
case ice_aqc_opc_download_pkg:
case ice_aqc_opc_get_pkg_info_list:
case ice_aqc_opc_get_ver:
+ case ice_aqc_opc_upload_section:
+ case ice_aqc_opc_update_pkg:
+ case ice_aqc_opc_set_port_params:
+ case ice_aqc_opc_get_vlan_mode_parameters:
+ case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_add_recipe:
+ case ice_aqc_opc_recipe_to_profile:
+ case ice_aqc_opc_get_recipe:
+ case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
@@ -2737,6 +2748,34 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_aq_set_port_params - set physical port parameters.
+ * @pi: pointer to the port info struct
+ * @double_vlan: if set double VLAN is enabled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Physical port parameters (0x0203)
+ */
+int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd)
+
+{
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+ u16 cmd_flags = 0;
+
+ cmd = &desc.params.set_port_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ if (double_vlan)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->cmd_flags = cpu_to_le16(cmd_flags);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
@@ -4759,6 +4798,59 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_read_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type,
+ * bits [3:0] - data size to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read I2C (0x06E2)
+ */
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
+ cmd = &desc.params.read_i2c;
+
+ if (!data)
+ return -EINVAL;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ struct ice_aqc_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 1c57097ddf0b..872ea7d2332d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -4,12 +4,14 @@
#ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_
-#include "ice.h"
+#include <linux/bitfield.h>
+
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
-#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+#include "ice_switch.h"
+#include "ice_fdir.h"
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
@@ -85,6 +87,9 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd);
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
@@ -205,5 +210,9 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d73348f279f7..6abf28a14291 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -5,6 +5,7 @@
#define _ICE_DCB_H_
#include "ice_type.h"
+#include <scsi/iscsi_proto.h>
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index b94d8daeaa58..add90e75f05c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -916,7 +916,8 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
+ first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
@@ -925,7 +926,10 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 73edc24d81d5..9a84d746a6c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -116,9 +116,12 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
- ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+ vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
+ if (vlan_ops->dis_stripping(ctrl_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
@@ -127,7 +130,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
__dev_mc_unsync(uplink_netdev, NULL);
netif_addr_unlock_bh(uplink_netdev);
- if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
@@ -173,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
int q_id;
ice_for_each_txq(vsi, q_id) {
- struct ice_repr *repr = pf->vf[q_id].repr;
- struct ice_q_vector *q_vector = repr->q_vector;
- struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
- struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+
+ vf = ice_get_vf_by_id(pf, q_id);
+ if (WARN_ON(!vf))
+ continue;
+
+ repr = vf->repr;
+ q_vector = repr->q_vector;
+ tx_ring = vsi->tx_rings[q_id];
+ rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
@@ -196,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
+
+ ice_put_vf(vf);
+ }
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
+
+ /* Skip VFs that aren't configured */
+ if (!vf->repr->dst)
+ continue;
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
}
}
@@ -207,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
@@ -228,14 +275,16 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
goto err;
}
- if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err;
}
@@ -249,8 +298,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
netif_keep_dst(vf->repr->netdev);
}
- ice_for_each_vf(pf, i) {
- struct ice_repr *repr = pf->vf[i].repr;
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_repr *repr = vf->repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
@@ -263,43 +312,12 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- }
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
return -ENODEV;
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
- * @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
- */
-static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
-
- netif_napi_del(&vf->repr->q_vector->napi);
- }
-}
-
-/**
* ice_eswitch_update_repr - reconfigure VF port representor
* @vsi: VF VSI for which port representor is configured
*/
@@ -313,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
if (!ice_is_switchdev_running(pf))
return;
- vf = &pf->vf[vsi->vf_id];
+ vf = vsi->vf;
repr = vf->repr;
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
@@ -321,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
+ vsi->vf->vf_id);
}
}
@@ -405,7 +424,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
}
/**
@@ -414,10 +433,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*/
static void ice_eswitch_napi_del(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i)
- netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ netif_napi_del(&vf->repr->q_vector->napi);
}
/**
@@ -426,10 +448,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
*/
static void ice_eswitch_napi_enable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- napi_enable(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ napi_enable(&vf->repr->q_vector->napi);
}
/**
@@ -438,10 +463,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
*/
static void ice_eswitch_napi_disable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- napi_disable(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ napi_disable(&vf->repr->q_vector->napi);
}
/**
@@ -519,7 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (pf->eswitch_mode == mode)
return 0;
- if (pf->num_alloc_vfs) {
+ if (ice_has_vfs(pf)) {
dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP;
@@ -610,16 +638,17 @@ int ice_eswitch_configure(struct ice_pf *pf)
*/
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_start_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_start_tx_queues(vf->repr);
}
}
@@ -629,16 +658,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
*/
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_stop_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_stop_tx_queues(vf->repr);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a5dc9824e255..24cda7e1f916 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -164,6 +164,7 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("vf-true-promisc-support",
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
+ ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -315,16 +316,20 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ bool active = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- return true;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ active = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return active;
}
/**
@@ -1295,6 +1300,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
+
+ if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
+ ice_has_vfs(pf)) {
+ dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
+ ret = -EOPNOTSUPP;
+ }
ethtool_exit:
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
@@ -2803,6 +2816,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
+ xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4deb2c9446ec..c73cdab44f70 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -4,10 +4,19 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
#include "ice_flow.h"
+#include "ice.h"
+
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
+#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
@@ -523,6 +532,55 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
}
/**
+ * ice_add_tunnel_hint
+ * @hw: pointer to the HW structure
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
+ */
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+{
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
+
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
* ice_init_pkg_hints
* @hw: pointer to the HW structure
* @ice_seg: pointer to the segment of the package scan (non-NULL)
@@ -548,32 +606,23 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
&val);
- while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
label_name = ice_enum_labels(NULL, 0, &state, &val);
}
- /* Cache the appropriate boost TCAM entry pointers */
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
@@ -583,6 +632,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
}
}
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
}
/* Key creation */
@@ -874,6 +928,27 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
* ice_aq_update_pkg
* @hw: pointer to the hardware structure
* @pkg_buf: the package cmd buffer
@@ -957,25 +1032,21 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
- * ice_update_pkg
+ * ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
*/
-static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static int
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- u32 offset, info, i;
- int status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
+ int status = 0;
+ u32 i;
for (i = 0; i < count; i++) {
struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
bool last = ((i + 1) == count);
+ u32 offset, info;
status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
last, &offset, &info, NULL);
@@ -987,6 +1058,27 @@ static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
}
}
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
ice_release_change_lock(hw);
return status;
@@ -1080,6 +1172,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
break;
}
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ }
+
ice_release_global_cfg_lock(hw);
return state;
@@ -1117,6 +1216,7 @@ static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
+ int status;
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
ice_seg->hdr.seg_format_ver.major,
@@ -1133,8 +1233,12 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
le32_to_cpu(ice_buf_tbl->buf_count));
- return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
}
/**
@@ -1701,16 +1805,43 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
return bld;
}
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
* @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
*/
static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
{
u16 i;
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1757,7 +1888,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
if (fv) {
/* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv);
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
if (req_profs & prof_type)
set_bit((u16)offset, bm);
@@ -1768,20 +1899,19 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
/**
* ice_get_sw_fv_list
* @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
+ * @lkups: list of protocol types
* @bm: bitmap of field vectors to consider
* @fv_list: Head of a list
*
* Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
+ * a given protocol ID and offset and returns a list of structures of type
* "ice_sw_fv_list_entry". Every structure in the list has a field vector
* definition and profile ID information
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list)
{
struct ice_sw_fv_list_entry *fvl;
@@ -1793,7 +1923,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state));
- if (!ids_cnt || !hw->seg)
+ if (!lkups->n_val_words || !hw->seg)
return -EINVAL;
ice_seg = hw->seg;
@@ -1812,20 +1942,17 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
if (!test_bit((u16)offset, bm))
continue;
- for (i = 0; i < ids_cnt; i++) {
+ for (i = 0; i < lkups->n_val_words; i++) {
int j;
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
- */
for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
break;
if (j >= hw->blk[ICE_BLK_SW].es.fvw)
break;
- if (i + 1 == ids_cnt) {
+ if (i + 1 == lkups->n_val_words) {
fvl = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fvl), GFP_KERNEL);
if (!fvl)
@@ -1897,7 +2024,7 @@ void ice_init_prof_result_bm(struct ice_hw *hw)
*
* Frees a package buffer
*/
-static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
{
devm_kfree(ice_hw_to_dev(hw), bld);
}
@@ -1997,6 +2124,43 @@ ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
}
/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
* ice_pkg_buf_get_active_sections
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
*
@@ -2023,7 +2187,7 @@ static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
*
* Return a pointer to the buffer's header
*/
-static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
{
if (!bld)
return NULL;
@@ -2060,6 +2224,89 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
}
/**
+ * ice_upd_dvm_boost_entry
+ * @hw: pointer to the HW structure
+ * @entry: pointer to double vlan boost entry info
+ */
+static int
+ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ int status = -ENOSPC;
+ struct ice_buf_build *bld;
+ u8 val, dc, nm;
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return -ENOMEM;
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_upd_dvm_boost_entry_err;
+
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ struct_size(sect_rx, tcam, 1));
+ if (!sect_rx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_rx->count = cpu_to_le16(1);
+
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ struct_size(sect_tx, tcam, 1));
+ if (!sect_tx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_tx->count = cpu_to_le16(1);
+
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
+
+ /* re-write the don't care and never match bits accordingly */
+ if (entry->enable) {
+ /* all bits are don't care */
+ val = 0x00;
+ dc = 0xFF;
+ nm = 0x00;
+ } else {
+ /* disable, one never match bit, the rest are don't care */
+ val = 0x00;
+ dc = 0xF7;
+ nm = 0x08;
+ }
+
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ &val, NULL, &dc, &nm, 0, sizeof(u8));
+
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
+
+ status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
+
+ice_upd_dvm_boost_entry_err:
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_set_dvm_boost_entries
+ * @hw: pointer to the HW structure
+ *
+ * Enable double vlan by updating the appropriate boost tcam entries.
+ */
+int ice_set_dvm_boost_entries(struct ice_hw *hw)
+{
+ int status;
+ u16 i;
+
+ for (i = 0; i < hw->dvm_upd.count; i++) {
+ status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
* ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
* @type: type of tunnel
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 6cbc29bcb02f..9c530c86703e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -87,8 +87,14 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
int
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
+int
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
enum ice_tunnel_type type);
@@ -96,6 +102,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
+int ice_set_dvm_boost_entries(struct ice_hw *hw);
/* Rx parser PTYPE functions */
bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
@@ -119,4 +126,10 @@ void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index fc087e0b5292..974d14a83b2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -162,6 +162,7 @@ struct ice_meta_sect {
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
@@ -416,6 +417,8 @@ enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
+ TNL_GTPC,
+ TNL_GTPU,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -442,6 +445,19 @@ struct ice_tunnel_table {
u16 valid_count[__TNL_TYPE_CNT];
};
+struct ice_dvm_entry {
+ u16 boost_addr;
+ u16 enable;
+ struct ice_boost_tcam_entry *boost_entry;
+};
+
+#define ICE_DVM_MAX_ENTRIES 48
+
+struct ice_dvm_table {
+ struct ice_dvm_entry tbl[ICE_DVM_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
@@ -659,7 +675,35 @@ enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
- ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_TUN_GTPU = 0x8,
+ ICE_PROF_TUN_GTPC = 0x10,
+ ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF,
};
+
+/* Number of bits/bytes contained in meta init entry. Note, this should be a
+ * multiple of 32 bits.
+ */
+#define ICE_META_INIT_BITS 192
+#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \
+ BITS_PER_BYTE))
+
+/* The meta init Flag field starts at this bit */
+#define ICE_META_FLAGS_ST 123
+
+/* The entry and bit to check for Double VLAN Mode (DVM) support */
+#define ICE_META_VLAN_MODE_ENTRY 0
+#define ICE_META_FLAG_VLAN_MODE 60
+#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \
+ ICE_META_FLAG_VLAN_MODE)
+
+struct ice_meta_init_entry {
+ __le32 bm[ICE_META_INIT_DW_CNT];
+};
+
+struct ice_meta_init_section {
+ __le16 count;
+ __le16 offset;
+ struct ice_meta_init_entry entry;
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index beed4838dcbe..ef103e47a8dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -3,6 +3,7 @@
#include "ice_common.h"
#include "ice_flow.h"
+#include <net/gre.h>
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 84b6e4464a21..b465d27d9b80 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include "ice_flex_type.h"
+
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c29177c6bb9d..af57eb114966 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -203,21 +203,22 @@ ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
* ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
- * @vlan_id: VLAN ID to add
- * @action: filter action
+ * @vlan: VLAN filter details
*/
static int
ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
- u16 vlan_id, enum ice_sw_fwd_act_type action)
+ struct ice_vlan *vlan)
{
struct ice_fltr_info info = { 0 };
info.flag = ICE_FLTR_TX;
info.src_id = ICE_SRC_ID_VSI;
info.lkup_type = ICE_SW_LKUP_VLAN;
- info.fltr_act = action;
+ info.fltr_act = ICE_FWD_TO_VSI;
info.vsi_handle = vsi->idx;
- info.l_data.vlan.vlan_id = vlan_id;
+ info.l_data.vlan.vlan_id = vlan->vid;
+ info.l_data.vlan.tpid = vlan->tpid;
+ info.l_data.vlan.tpid_valid = true;
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
@@ -310,19 +311,17 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_prepare_vlan - add or remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
* @vlan_action: pointer to add or remove VLAN function
*/
static int
-ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action,
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan,
int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
LIST_HEAD(tmp_list);
int result;
- if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan))
return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
@@ -395,27 +394,21 @@ int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_add_vlan - add single VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
*/
-int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_add_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list);
}
/**
* ice_fltr_remove_vlan - remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: filter VLAN to remove
- * @action: action to remove
+ * @vlan: VLAN filter details
*/
-int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_remove_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 3eb42479175f..0f3dbc308eec 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLTR_H_
#define _ICE_FLTR_H_
+#include "ice_vlan.h"
+
void ice_fltr_free_list(struct device *dev, struct list_head *h);
int
ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
@@ -32,12 +34,8 @@ ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-int
-ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
-int
-ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
new file mode 100644
index 000000000000..35579cf4283f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include <linux/tty_driver.h>
+
+/**
+ * ice_gnss_read - Read data from internal GNSS module
+ * @work: GNSS read work structure
+ *
+ * Read the data from internal GNSS receiver, number of bytes read will be
+ * returned in *read_data parameter.
+ */
+static void ice_gnss_read(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ read_work.work);
+ struct ice_aqc_link_topo_addr link_topo;
+ u8 i2c_params, bytes_read;
+ struct tty_port *port;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ __be16 data_len_b;
+ char *buf = NULL;
+ u16 i, data_len;
+ int err = 0;
+
+ pf = gnss->back;
+ if (!pf || !gnss->tty || !gnss->tty->port) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ hw = &pf->hw;
+ port = gnss->tty->port;
+
+ buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH |
+ ICE_AQC_I2C_USE_REPEATED_START;
+
+ /* Read data length in a loop, when it's not 0 the data is ready */
+ for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) {
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
+ i2c_params, (u8 *)&data_len_b, NULL);
+ if (err)
+ goto exit_buf;
+
+ data_len = be16_to_cpu(data_len_b);
+ if (data_len != 0 && data_len != U16_MAX)
+ break;
+
+ mdelay(10);
+ }
+
+ data_len = min(data_len, (u16)PAGE_SIZE);
+ data_len = tty_buffer_request_room(port, data_len);
+ if (!data_len) {
+ err = -ENOMEM;
+ goto exit_buf;
+ }
+
+ /* Read received data */
+ for (i = 0; i < data_len; i += bytes_read) {
+ u16 bytes_left = data_len - i;
+
+ bytes_read = bytes_left < ICE_MAX_I2C_DATA_SIZE ? bytes_left :
+ ICE_MAX_I2C_DATA_SIZE;
+
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
+ bytes_read, &buf[i], NULL);
+ if (err)
+ goto exit_buf;
+ }
+
+ /* Send the data to the tty layer for users to read. This doesn't
+ * actually push the data through unless tty->low_latency is set.
+ */
+ tty_insert_flip_string(port, buf, i);
+ tty_flip_buffer_push(port);
+
+exit_buf:
+ free_page((unsigned long)buf);
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
+ ICE_GNSS_TIMER_DELAY_TIME);
+exit:
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
+}
+
+/**
+ * ice_gnss_struct_init - Initialize GNSS structure for the TTY
+ * @pf: Board private structure
+ */
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct kthread_worker *kworker;
+ struct gnss_serial *gnss;
+
+ gnss = kzalloc(sizeof(*gnss), GFP_KERNEL);
+ if (!gnss)
+ return NULL;
+
+ mutex_init(&gnss->gnss_mutex);
+ gnss->open_count = 0;
+ gnss->back = pf;
+ pf->gnss_serial = gnss;
+
+ kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
+ /* Allocate a kworker for handling work required for the GNSS TTY
+ * writes.
+ */
+ kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ if (IS_ERR(kworker)) {
+ kfree(gnss);
+ return NULL;
+ }
+
+ gnss->kworker = kworker;
+
+ return gnss;
+}
+
+/**
+ * ice_gnss_tty_open - Initialize GNSS structures on TTY device open
+ * @tty: pointer to the tty_struct
+ * @filp: pointer to the file
+ *
+ * This routine is mandatory. If this routine is not filled in, the attempted
+ * open will fail with ENODEV.
+ */
+static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gnss_serial *gnss;
+ struct ice_pf *pf;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return -EFAULT;
+
+ /* Clear the pointer in case something fails */
+ tty->driver_data = NULL;
+
+ /* Get the serial object associated with this tty pointer */
+ gnss = pf->gnss_serial;
+ if (!gnss) {
+ /* Initialize GNSS struct on the first device open */
+ gnss = ice_gnss_struct_init(pf);
+ if (!gnss)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ /* Save our structure within the tty structure */
+ tty->driver_data = gnss;
+ gnss->tty = tty;
+ gnss->open_count++;
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
+
+ mutex_unlock(&gnss->gnss_mutex);
+
+ return 0;
+}
+
+/**
+ * ice_gnss_tty_close - Cleanup GNSS structures on tty device close
+ * @tty: pointer to the tty_struct
+ * @filp: pointer to the file
+ */
+static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gnss_serial *gnss = tty->driver_data;
+ struct ice_pf *pf;
+
+ if (!gnss)
+ return;
+
+ pf = (struct ice_pf *)tty->driver->driver_state;
+ if (!pf)
+ return;
+
+ mutex_lock(&gnss->gnss_mutex);
+
+ if (!gnss->open_count) {
+ /* Port was never opened */
+ dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n");
+ goto exit;
+ }
+
+ gnss->open_count--;
+ if (gnss->open_count <= 0) {
+ /* Port is in shutdown state */
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ }
+exit:
+ mutex_unlock(&gnss->gnss_mutex);
+}
+
+/**
+ * ice_gnss_tty_write - Dummy TTY write function to avoid kernel panic
+ * @tty: pointer to the tty_struct
+ * @buf: pointer to the user data
+ * @cnt: the number of characters that was able to be sent to the hardware (or
+ * queued to be sent at a later time)
+ */
+static int
+ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int cnt)
+{
+ return 0;
+}
+
+/**
+ * ice_gnss_tty_write_room - Dummy TTY write_room function to avoid kernel panic
+ * @tty: pointer to the tty_struct
+ */
+static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
+{
+ return 0;
+}
+
+static const struct tty_operations tty_gps_ops = {
+ .open = ice_gnss_tty_open,
+ .close = ice_gnss_tty_close,
+ .write = ice_gnss_tty_write,
+ .write_room = ice_gnss_tty_write_room,
+};
+
+/**
+ * ice_gnss_create_tty_driver - Create a TTY driver for GNSS
+ * @pf: Board private structure
+ */
+static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const int ICE_TTYDRV_NAME_MAX = 14;
+ struct tty_driver *tty_driver;
+ char *ttydrv_name;
+ int err;
+
+ tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
+ if (IS_ERR(tty_driver)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to allocate memory for GNSS TTY\n");
+ return NULL;
+ }
+
+ ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL);
+ if (!ttydrv_name) {
+ tty_driver_kref_put(tty_driver);
+ return NULL;
+ }
+
+ snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_",
+ (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn));
+
+ /* Initialize the tty driver*/
+ tty_driver->owner = THIS_MODULE;
+ tty_driver->driver_name = dev_driver_string(dev);
+ tty_driver->name = (const char *)ttydrv_name;
+ tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ tty_driver->init_termios = tty_std_termios;
+ tty_driver->init_termios.c_iflag &= ~INLCR;
+ tty_driver->init_termios.c_iflag |= IGNCR;
+ tty_driver->init_termios.c_oflag &= ~OPOST;
+ tty_driver->init_termios.c_lflag &= ~ICANON;
+ tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX);
+ /* baud rate 9600 */
+ tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600);
+ tty_driver->driver_state = pf;
+ tty_set_operations(tty_driver, &tty_gps_ops);
+
+ pf->gnss_serial = NULL;
+
+ tty_port_init(&pf->gnss_tty_port);
+ tty_port_link_device(&pf->gnss_tty_port, tty_driver, 0);
+
+ err = tty_register_driver(tty_driver);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register TTY driver err=%d\n",
+ err);
+
+ tty_port_destroy(&pf->gnss_tty_port);
+ kfree(ttydrv_name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+
+ return NULL;
+ }
+
+ return tty_driver;
+}
+
+/**
+ * ice_gnss_init - Initialize GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_init(struct ice_pf *pf)
+{
+ struct tty_driver *tty_driver;
+
+ tty_driver = ice_gnss_create_tty_driver(pf);
+ if (!tty_driver)
+ return;
+
+ pf->ice_gnss_tty_driver = tty_driver;
+
+ set_bit(ICE_FLAG_GNSS, pf->flags);
+ dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n");
+}
+
+/**
+ * ice_gnss_exit - Disable GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_exit(struct ice_pf *pf)
+{
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
+ return;
+
+ tty_port_destroy(&pf->gnss_tty_port);
+
+ if (pf->gnss_serial) {
+ struct gnss_serial *gnss = pf->gnss_serial;
+
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kfree(gnss);
+ pf->gnss_serial = NULL;
+ }
+
+ tty_unregister_driver(pf->ice_gnss_tty_driver);
+ kfree(pf->ice_gnss_tty_driver->name);
+ tty_driver_kref_put(pf->ice_gnss_tty_driver);
+ pf->ice_gnss_tty_driver = NULL;
+}
+
+/**
+ * ice_gnss_is_gps_present - Check if GPS HW is present
+ * @hw: pointer to HW struct
+ */
+bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ return false;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+ if (ice_is_e810t(hw)) {
+ int err;
+ u8 data;
+
+ err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
+ return false;
+ } else {
+ return false;
+ }
+#else
+ if (!ice_is_e810t(hw))
+ return false;
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
new file mode 100644
index 000000000000..9211adb2372c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_GNSS_H_
+#define _ICE_GNSS_H_
+
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define ICE_E810T_GNSS_I2C_BUS 0x2
+#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
+/* Data length register is big endian */
+#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
+#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
+#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
+#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
+#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+#define ICE_MAX_UBX_READ_TRIES 255
+
+/**
+ * struct gnss_serial - data used to initialize GNSS TTY port
+ * @back: back pointer to PF
+ * @tty: pointer to the tty for this device
+ * @open_count: number of times this port has been opened
+ * @gnss_mutex: gnss_mutex used to protect GNSS serial operations
+ * @kworker: kwork thread for handling periodic work
+ * @read_work: read_work function for handling GNSS reads
+ */
+struct gnss_serial {
+ struct ice_pf *back;
+ struct tty_struct *tty;
+ int open_count;
+ struct mutex gnss_mutex; /* protects GNSS serial structure */
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work read_work;
+};
+
+#if IS_ENABLED(CONFIG_TTY)
+void ice_gnss_init(struct ice_pf *pf);
+void ice_gnss_exit(struct ice_pf *pf);
+bool ice_gnss_is_gps_present(struct ice_hw *hw);
+#else
+static inline void ice_gnss_init(struct ice_pf *pf) { }
+static inline void ice_gnss_exit(struct ice_pf *pf) { }
+static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_TTY) */
+#endif /* _ICE_GNSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index fc3580167e7b..25a436d342c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -34,6 +34,9 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
{
struct iidc_auxiliary_drv *iadrv;
+ if (WARN_ON_ONCE(!in_task()))
+ return;
+
if (!pf->adev)
return;
@@ -79,7 +82,7 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (!ice_is_rdma_ena(pf))
return -EINVAL;
vsi = ice_get_main_vsi(pf);
@@ -227,6 +230,11 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
+ qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
@@ -236,7 +244,7 @@ EXPORT_SYMBOL_GPL(ice_get_qos_params);
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
int index;
index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
@@ -274,7 +282,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
- if (!ice_is_aux_ena(pf))
+ if (!ice_is_rdma_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index b7796b8aecbd..4b0c86757df9 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -5,7 +5,6 @@
#define _ICE_IDC_INT_H_
#include <linux/net/intel/iidc.h>
-#include "ice.h"
struct ice_pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 85a612838a89..b3baf7c3f910 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -424,6 +424,8 @@ enum ice_rx_flex_desc_status_error_0_bits {
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ /* [10:5] reserved */
+ ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 53256aca27c7..b897926f817d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -8,6 +8,7 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_devlink.h"
+#include "ice_vsi_vlan_ops.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -165,21 +166,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf_id: ID of the VF being configured
+ * @vf: the VF associated with this VSI, if any
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
{
+ enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
- struct ice_vf *vf = NULL;
- if (vsi->type == ICE_VSI_VF)
- vsi->vf_id = vf_id;
- else
- vsi->vf_id = ICE_INVAL_VFID;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return;
- switch (vsi->type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
@@ -216,22 +215,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
/* The number of queues for ctrl VSI is equal to number of VFs.
* Each ring is associated to the corresponding VF_PR netdev.
*/
- vsi->alloc_txq = pf->num_alloc_vfs;
- vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->alloc_txq = ice_get_num_vfs(pf);
+ vsi->alloc_rxq = vsi->alloc_txq;
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
- vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_msix_per_vf includes (VF miscellaneous vector +
+ /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
@@ -247,7 +245,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
break;
}
@@ -298,7 +296,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
return;
if (vsi->type == ICE_VSI_VF)
- ctxt->vf_num = vsi->vf_id;
+ ctxt->vf_num = vsi->vf->vf_id;
ctxt->vsi_num = vsi->vsi_num;
memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
@@ -383,8 +381,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
- vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
@@ -436,13 +433,16 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
- ice_for_each_vf(pf, i)
- napi_schedule(&pf->vf[i].repr->q_vector->napi);
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ napi_schedule(&vf->repr->q_vector->napi);
+ rcu_read_unlock();
return IRQ_HANDLED;
}
@@ -452,17 +452,24 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
* @pf: board private structure
* @vsi_type: type of VSI
* @ch: ptr to channel
- * @vf_id: ID of the VF being configured
+ * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
+ *
+ * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL,
+ * it may be NULL in the case there is no association with a VF. For
+ * ICE_VSI_VF the VF pointer *must not* be NULL.
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, u16 vf_id)
+ struct ice_channel *ch, struct ice_vf *vf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return NULL;
+
/* Need to protect the allocation of the VSIs at the PF level */
mutex_lock(&pf->sw_mutex);
@@ -484,9 +491,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf_id);
+ ice_vsi_set_num_qs(vsi, vf);
else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ ice_vsi_set_num_qs(vsi, NULL);
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
@@ -509,10 +516,16 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
+
+ /* For the PF control VSI this is NULL, for the VF control VSI
+ * this will be the first VF to allocate it.
+ */
+ vsi->vf = vf;
break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
+ vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
@@ -530,7 +543,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
+ if (vsi->type == ICE_VSI_CTRL && !vf) {
/* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
@@ -545,8 +558,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
pf->next_vsi);
}
- if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
- pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
+ if (vsi->type == ICE_VSI_CTRL && vf)
+ vf->ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
@@ -732,14 +745,14 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_is_aux_ena
+ * ice_is_rdma_ena
* @pf: pointer to the PF struct
*
- * returns true if AUX devices/drivers are supported, false otherwise
+ * returns true if RDMA is currently supported, false otherwise
*/
-bool ice_is_aux_ena(struct ice_pf *pf)
+bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
/**
@@ -838,11 +851,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
/**
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @hw: HW structure used to determine the VLAN mode of the device
* @ctxt: the VSI context being set
*
* This initializes a default VSI context for all sections except the Queues.
*/
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
{
u32 table = 0;
@@ -853,13 +867,27 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
- /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
- * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
- * packets untagged/tagged.
+ /* allow all untagged/tagged packets by default on Tx */
+ ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
+ * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
+ *
+ * DVM - leave inner VLAN in packet by default
*/
- ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
- ICE_AQ_VSI_VLAN_MODE_M) >>
- ICE_AQ_VSI_VLAN_MODE_S);
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ ctxt->info.outer_vlan_flags |=
+ (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
+ ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ }
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1114,7 +1142,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
- ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
break;
default:
ret = -ENODEV;
@@ -1136,7 +1164,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ice_set_dflt_vsi_ctx(ctxt);
+ ice_set_dflt_vsi_ctx(hw, ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
@@ -1168,25 +1196,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
}
- /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
- * respectively
- */
- if (vsi->type == ICE_VSI_VF) {
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (pf->vf[vsi->vf_id].spoofchk) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
- }
-
/* Allow control frames out of main VSI */
if (vsi->type == ICE_VSI_PF) {
ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
@@ -1325,6 +1334,36 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
}
/**
+ * ice_get_vf_ctrl_res - Get VF control VSI resource
+ * @pf: pointer to the PF structure
+ * @vsi: the VSI to allocate a resource for
+ *
+ * Look up whether another VF has already allocated the control VSI resource.
+ * If so, re-use this resource so that we share it among all VFs.
+ *
+ * Otherwise, allocate the resource and return it.
+ */
+static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int base;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ rcu_read_unlock();
+ return base;
+ }
+ }
+ rcu_read_unlock();
+
+ return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -1356,20 +1395,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
- break;
- }
- }
- if (i == pf->num_alloc_vfs)
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- ICE_RES_VF_CTRL_VEC_ID);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ base = ice_get_vf_ctrl_res(pf, vsi);
} else {
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
@@ -1431,6 +1458,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
+ bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 i;
@@ -1452,6 +1480,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
+ if (dvm_ena)
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
+ else
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
WRITE_ONCE(vsi->tx_rings[i], ring);
}
@@ -1763,62 +1795,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_vsi_add_vlan - Add VSI membership for given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be added
- * @action: filter action to be performed on match
- */
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err = 0;
-
- dev = ice_pf_to_dev(pf);
-
- if (!ice_fltr_add_vlan(vsi, vid, action)) {
- vsi->num_vlan++;
- } else {
- err = -ENODEV;
- dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
- vsi->vsi_num);
- }
-
- return err;
-}
-
-/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be removed
- *
- * Returns 0 on success and negative on failure
- */
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err;
-
- dev = ice_pf_to_dev(pf);
-
- err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!err) {
- vsi->num_vlan--;
- } else if (err == -ENOENT) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
- vid, vsi->vsi_num, err);
- err = 0;
- } else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
- vid, vsi->vsi_num, err);
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
@@ -2146,95 +2122,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
}
/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the VSI being changed
- */
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
- * insertion happens in the Tx hot path, in ice_tx_map.
- */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
- /* Preserve existing VLAN strip setting */
- ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
- ICE_AQ_VSI_VLAN_EMOD_M);
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the VSI being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- /* do not allow modifying VLAN stripping when a port VLAN is configured
- * on this VSI
- */
- if (vsi->info.pvid)
- return 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring what the VSI should do with the VLAN tag in
- * the Rx packet. We can either leave the tag in the packet or put it in
- * the Rx descriptor.
- */
- if (ena)
- /* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- else
- /* Disable stripping. Leave tag in packet */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
-
- /* Allow all packets untagged/tagged */
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
* ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
* @vsi: the VSI whose rings are to be enabled
*
@@ -2327,61 +2214,6 @@ bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
}
-/**
- * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
- * @vsi: VSI to enable or disable VLAN pruning on
- * @ena: set to true to enable VLAN pruning and false to disable it
- *
- * returns 0 if VSI is updated, negative otherwise
- */
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
-{
- struct ice_vsi_ctx *ctxt;
- struct ice_pf *pf;
- int status;
-
- if (!vsi)
- return -EINVAL;
-
- /* Don't enable VLAN pruning if the netdev is currently in promiscuous
- * mode. VLAN pruning will be enabled when the interface exits
- * promiscuous mode if any VLAN filters are active.
- */
- if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
- return 0;
-
- pf = vsi->back;
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
-
- if (ena)
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- else
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
- if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- status, ice_aq_str(pf->hw.adminq.sq_last_status));
- goto err_out;
- }
-
- vsi->info.sw_flags2 = ctxt->info.sw_flags2;
-
- kfree(ctxt);
- return 0;
-
-err_out:
- kfree(ctxt);
- return -EIO;
-}
-
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
@@ -2416,7 +2248,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
}
if (vsi->type == ICE_VSI_VF) {
- struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
+ struct ice_vf *vf = vsi->vf;
q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
} else {
@@ -2601,9 +2433,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @pf: board private structure
* @pi: pointer to the port_info instance
* @vsi_type: VSI type
- * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
- * used only for ICE_VSI_VF VSI type. For other VSI types, should
- * fill-in ICE_INVAL_VFID as input.
+ * @vf: pointer to VF to which this VSI connects. This field is used primarily
+ * for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
@@ -2613,7 +2444,8 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
+ enum ice_vsi_type vsi_type, struct ice_vf *vf,
+ struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2621,11 +2453,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
int ret, i;
if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2637,9 +2469,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
- vsi->vf_id = vf_id;
-
ice_alloc_fd_res(vsi);
if (vsi_type != ICE_VSI_CHNL) {
@@ -2661,6 +2490,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_get_qs;
+ ice_vsi_init_vlan_ops(vsi);
+
switch (vsi->type) {
case ICE_VSI_CTRL:
case ICE_VSI_SWITCHDEV_CTRL:
@@ -2681,17 +2512,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
- /* Always add VLAN ID 0 switch rule by default. This is needed
- * in order to allow all untagged and 0 tagged priority traffic
- * if Rx VLAN pruning is enabled. Also there are cases where we
- * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
- * so this handles those cases (i.e. adding the PF to a bridge
- * without the 8021q module loaded).
- */
- ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (ret)
- goto unroll_clear_rings;
-
ice_vsi_map_rings_to_vectors(vsi);
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -3069,6 +2889,37 @@ void ice_napi_del(struct ice_vsi *vsi)
}
/**
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
+ *
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
+ */
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3111,23 +2962,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
- break;
- }
- if (i == pf->num_alloc_vfs) {
- /* No other VFs left that have control VSI, reclaim SW
- * interrupts back to the common pool
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
} else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
@@ -3311,7 +3147,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors = 0;
- struct ice_vf *vf = NULL;
enum ice_vsi_type vtype;
struct ice_pf *pf;
int ret, i;
@@ -3321,8 +3156,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
pf = vsi->back;
vtype = vsi->type;
- if (vtype == ICE_VSI_VF)
- vf = &pf->vf[vsi->vf_id];
+ if (WARN_ON(vtype == ICE_VSI_VF) && !vsi->vf)
+ return -EINVAL;
+
+ ice_vsi_init_vlan_ops(vsi);
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
@@ -3359,9 +3196,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf->vf_id);
+ ice_vsi_set_num_qs(vsi, vsi->vf);
else
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ ice_vsi_set_num_qs(vsi, NULL);
ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
@@ -4123,9 +3960,9 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
*/
if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
+ ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
@@ -4137,6 +3974,120 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
+ * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
+ * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
+ * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
+ *
+ * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
+ * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
+ * traffic in SVM, since the VLAN TPID isn't part of filtering.
+ *
+ * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
+ * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
+ * part of filtering.
+ */
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * Delete the VLAN 0 filters in the same manner that they were added in
+ * ice_vsi_add_vlan_zero.
+ */
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
+ * @vsi: VSI used to get the VLAN mode
+ *
+ * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
+ * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
+ */
+static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
+{
+#define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
+#define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
+ /* no VLAN 0 filter is created when a port VLAN is active */
+ if (vsi->type == ICE_VSI_VF) {
+ if (WARN_ON(!vsi->vf))
+ return 0;
+
+ if (ice_vf_is_port_vlan_ena(vsi->vf))
+ return 0;
+ }
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
+ else
+ return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
+}
+
+/**
+ * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
+ * @vsi: VSI used to determine if any non-zero VLANs have been added
+ */
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
+ * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
+ * @vsi: VSI used to get the number of non-zero VLANs added
+ */
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
* ice_is_feature_supported
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to be checked
@@ -4190,8 +4141,11 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- if (ice_is_e810t(&pf->hw))
+ if (ice_is_e810t(&pf->hw)) {
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index b2ed189527d6..0095329949d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -5,6 +5,7 @@
#define _ICE_LIB_H_
#include "ice.h"
+#include "ice_vlan.h"
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
@@ -22,15 +23,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
-
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
-
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
-
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
@@ -45,8 +37,6 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
-
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -62,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
+ enum ice_vsi_type vsi_type, struct ice_vf *vf,
+ struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -110,7 +101,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-bool ice_is_aux_ena(struct ice_pf *pf);
+bool ice_is_rdma_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
@@ -132,7 +123,10 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
-
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b7e8744b0c0a..b588d7995631 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -21,6 +21,7 @@
#include "ice_trace.h"
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
+#include "ice_vsi_vlan_ops.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -47,6 +48,21 @@ static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
+/**
+ * ice_hw_to_dev - Get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ *
+ * Used to access the device pointer from compilation units which can't easily
+ * include the definition of struct ice_pf without leading to circular header
+ * dependencies.
+ */
+struct device *ice_hw_to_dev(struct ice_hw *hw)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ return &pf->pdev->dev;
+}
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -244,7 +260,7 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
else
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
@@ -264,7 +280,7 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
else
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
@@ -279,6 +295,7 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
@@ -352,7 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -366,7 +383,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (vsi->num_vlan > 1)
+ if (ice_vsi_has_non_zero_vlans(vsi))
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
@@ -396,7 +413,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
err = 0;
- ice_cfg_vlan_pruning(vsi, false);
+ vlan_ops->dis_rx_filtering(vsi);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -409,8 +426,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true);
+ if (vsi->current_netdev_flags &
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+ vlan_ops->ena_rx_filtering(vsi);
}
}
}
@@ -502,7 +520,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
@@ -517,8 +536,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- ice_for_each_vf(pf, i)
- ice_set_vf_state_qs_dis(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_set_vf_state_qs_dis(vf);
+ mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
if (reset_type != ICE_RESET_PFR)
@@ -565,6 +586,9 @@ skip:
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_prepare_for_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -610,7 +634,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
}
@@ -661,7 +685,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
return;
@@ -1660,7 +1684,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
u32 reg;
if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
@@ -1748,47 +1773,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- reg = rd32(hw, VP_MDET_TX_PQM(i));
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
- wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
- wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_RX(i));
+ reg = rd32(hw, VP_MDET_RX(vf->vf_id));
if (reg & VP_MDET_RX_VALID_M) {
- wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
vf->mdd_rx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
- i);
+ vf->vf_id);
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
@@ -1799,12 +1823,11 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
- mutex_lock(&pf->vf[i].cfg_lock);
- ice_reset_vf(&pf->vf[i], false);
- mutex_unlock(&pf->vf[i].cfg_lock);
+ ice_reset_vf(vf, ICE_VF_RESET_LOCK);
}
}
}
+ mutex_unlock(&pf->vfs.table_lock);
ice_print_vfs_mdd_events(pf);
}
@@ -2255,6 +2278,19 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ swap(event->reg, pf->oicr_err_reg);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
/* Plug aux device per request */
ice_plug_aux_dev(pf);
@@ -2454,7 +2490,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf)
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name,
q_vector);
@@ -2521,10 +2557,10 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
+ xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
+ xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
@@ -3041,17 +3077,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
- struct iidc_event *event;
-
+ pf->oicr_err_reg |= oicr;
+ set_bit(ICE_AUX_ERR_PENDING, pf->state);
ena_mask &= ~ICE_AUX_CRIT_ERR;
- event = kzalloc(sizeof(*event), GFP_ATOMIC);
- if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
- /* report the entire OICR value to AUX driver */
- event->reg = oicr;
- ice_send_event_to_aux(pf, event);
- kfree(event);
- }
}
/* Report any remaining unexpected interrupts */
@@ -3256,6 +3284,7 @@ static void ice_set_ops(struct net_device *netdev)
static void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
@@ -3282,6 +3311,10 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
+ if (is_dvm_ena)
+ vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
@@ -3313,6 +3346,15 @@ static void ice_set_netdev_features(struct net_device *netdev)
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
+
+ /* advertise support but don't enable by default since only one type of
+ * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
+ * type turns on the other has to be turned off. This is enforced by the
+ * ice_fix_features() ndo callback.
+ */
+ if (is_dvm_ena)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX;
}
/**
@@ -3387,14 +3429,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
}
/**
@@ -3408,7 +3450,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
}
/**
@@ -3422,40 +3464,37 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
}
/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be added
*
* net_device_ops implementation for adding VLAN IDs
*/
static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
- /* Enable VLAN pruning when a VLAN other than 0 is added */
- if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
- if (ret)
- return ret;
- }
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->add_vlan(vsi, &vlan);
if (!ret)
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
@@ -3465,36 +3504,36 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/**
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be removed
*
* net_device_ops implementation for removing VLAN IDs
*/
static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
- /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Make sure VLAN delete is successful before updating VLAN
* information
*/
- ret = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->del_vlan(vsi, &vlan);
if (ret)
return ret;
- /* Disable pruning when VLAN 0 is the only VLAN rule */
- if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false);
-
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
- return ret;
+ return 0;
}
/**
@@ -3563,12 +3602,17 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
+ bool dvm = ice_is_dvm_ena(&pf->hw);
struct ice_vsi *vsi;
int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (status)
+ return -EIO;
+
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi)
return -ENOMEM;
@@ -3679,6 +3723,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
+ mutex_destroy(&pf->vfs.table_lock);
if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs);
@@ -3703,19 +3748,16 @@ static void ice_set_pf_caps(struct ice_pf *pf)
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
- if (func_caps->common_cap.rdma) {
+ if (func_caps->common_cap.rdma)
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
- pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
- ICE_MAX_VF_COUNT);
+ pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
+ ICE_MAX_SRIOV_VFS);
}
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
@@ -3781,6 +3823,9 @@ static int ice_init_pf(struct ice_pf *pf)
return -ENOMEM;
}
+ mutex_init(&pf->vfs.table_lock);
+ hash_init(pf->vfs.table);
+
return 0;
}
@@ -3835,7 +3880,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
/* reserve vectors for RDMA auxiliary driver */
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
@@ -3876,7 +3921,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
int v_remain = v_actual - v_other;
int v_rdma = 0, v_min_rdma = 0;
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
@@ -3910,7 +3955,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (ice_is_rdma_ena(pf))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
@@ -4090,8 +4135,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
/* allow all VLANs on Tx and don't strip on Rx */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
- ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -4100,7 +4145,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
}
kfree(ctxt);
@@ -4485,8 +4530,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
@@ -4709,6 +4752,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4738,7 +4784,7 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_aux_ena(pf)) {
+ if (ice_is_rdma_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
@@ -4883,6 +4929,8 @@ static void ice_remove(struct pci_dev *pdev)
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
@@ -5596,6 +5644,194 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
return err;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/**
+ * ice_fix_features - fix the netdev features flags based on device limitations
+ * @netdev: ptr to the netdev that flags are being fixed on
+ * @features: features that need to be checked and possibly fixed
+ *
+ * Make sure any fixups are made to features in this callback. This enables the
+ * driver to not have to check unsupported configurations throughout the driver
+ * because that's the responsiblity of this callback.
+ *
+ * Single VLAN Mode (SVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * Double VLAN Mode (DVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * NETIF_F_HW_VLAN_STAG_FILTER
+ * NETIF_HW_VLAN_STAG_RX
+ * NETIF_HW_VLAN_STAG_TX
+ *
+ * Features that need fixing:
+ * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
+ * These are mutually exlusive as the VSI context cannot support multiple
+ * VLAN ethertypes simultaneously for stripping and/or insertion. If this
+ * is not done, then default to clearing the requested STAG offload
+ * settings.
+ *
+ * All supported filtering has to be enabled or disabled together. For
+ * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
+ * together. If this is not done, then default to VLAN filtering disabled.
+ * These are mutually exclusive as there is currently no way to
+ * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
+ * prune rules.
+ */
+static netdev_features_t
+ice_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ netdev_features_t supported_vlan_filtering;
+ netdev_features_t requested_vlan_filtering;
+ struct ice_vsi *vsi = np->vsi;
+
+ requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
+
+ /* make sure supported_vlan_filtering works for both SVM and DVM */
+ supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if (requested_vlan_filtering &&
+ requested_vlan_filtering != supported_vlan_filtering) {
+ if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
+ features |= supported_vlan_filtering;
+ } else {
+ netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
+ features &= ~supported_vlan_filtering;
+ }
+ }
+
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
+ netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ return features;
+}
+
+/**
+ * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN offload settings
+ *
+ * First, determine the vlan_ethertype based on the VLAN offload bits in
+ * features. Then determine if stripping and insertion should be enabled or
+ * disabled. Finally enable or disable VLAN stripping and insertion.
+ */
+static int
+ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int strip_err = 0, insert_err = 0;
+ u16 vlan_ethertype = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (enable_stripping)
+ strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
+ else
+ strip_err = vlan_ops->dis_stripping(vsi);
+
+ if (enable_insertion)
+ insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
+ else
+ insert_err = vlan_ops->dis_insertion(vsi);
+
+ if (strip_err || insert_err)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN filtering settings
+ *
+ * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
+ * features.
+ */
+static int
+ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ int err = 0;
+
+ /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
+ * if either bit is set
+ */
+ if (features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ err = vlan_ops->ena_rx_filtering(vsi);
+ else
+ err = vlan_ops->dis_rx_filtering(vsi);
+
+ return err;
+}
+
+/**
+ * ice_set_vlan_features - set VLAN settings based on suggested feature set
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ * Only update VLAN settings if the requested_vlan_features are different than
+ * the current_vlan_features.
+ */
+static int
+ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t current_vlan_features, requested_vlan_features;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int err;
+
+ current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_offload_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ current_vlan_features = netdev->features &
+ NETIF_VLAN_FILTERING_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_filtering_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -5630,26 +5866,9 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features & NETIF_F_RXHASH)
ice_vsi_manage_rss_lut(vsi, false);
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_set_vlan_features(netdev, features);
+ if (ret)
+ return ret;
if ((features & NETIF_F_NTUPLE) &&
!(netdev->features & NETIF_F_NTUPLE)) {
@@ -5673,23 +5892,26 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
else
clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- return ret;
+ return 0;
}
/**
- * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
* @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
- int ret = 0;
+ int err;
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
- ret = ice_vsi_manage_vlan_insertion(vsi);
+ err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
- return ret;
+ err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
+
+ return ice_vsi_add_vlan_zero(vsi);
}
/**
@@ -5930,9 +6152,9 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
- u64 *pkts, u64 *bytes)
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes)
{
unsigned int start;
@@ -6291,11 +6513,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0;
+ int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ vlan_err = ice_vsi_del_vlan_zero(vsi);
if (!ice_is_e810(&vsi->back->hw))
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
@@ -6337,7 +6560,7 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err) {
+ if (tx_err || rx_err || link_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6647,6 +6870,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
+ bool dvm;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6710,6 +6934,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
+ dvm = ice_is_dvm_ena(hw);
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_init_ctrlq;
+
err = ice_sched_init_port(hw->port_info);
if (err)
goto err_sched_init_port;
@@ -6746,6 +6976,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_reset(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
@@ -8606,6 +8839,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_start_xmit = ice_start_xmit,
.ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
+ .ndo_fix_features = ice_fix_features,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index f57c414bc0a9..82bc54fec7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -5,10 +5,18 @@
#define _ICE_OSDEP_H_
#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
+#include <net/udp_tunnel.h>
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
@@ -24,8 +32,8 @@ struct ice_dma_mem {
size_t size;
};
-#define ice_hw_to_dev(ptr) \
- (&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
+struct ice_hw;
+struct device *ice_hw_to_dev(struct ice_hw *hw);
#ifdef CONFIG_DYNAMIC_DEBUG
#define ice_debug(hw, type, fmt, args...) \
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..976a03d3bdd5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_pf_vsi_vlan_ops.h"
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ }
+}
+
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..6741ec8c5f6b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_PF_VSI_VLAN_OPS_H_
+#define _ICE_PF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 695b6dd61dc2..3f64300b0e14 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -29,6 +29,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
+ ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
@@ -40,6 +41,8 @@ enum ice_protocol_type {
ICE_VXLAN,
ICE_GENEVE,
ICE_NVGRE,
+ ICE_GTP,
+ ICE_GTP_NO_PAY,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -51,6 +54,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
+ ICE_SW_TUN_GTPU,
+ ICE_SW_TUN_GTPC,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -92,6 +97,7 @@ enum ice_prot_id {
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
+#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
@@ -180,6 +186,20 @@ struct ice_udp_tnl_hdr {
__be32 vni; /* only use lower 24-bits */
};
+struct ice_udp_gtp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 rsrvd_len;
+ __be32 teid;
+ __be16 rsrvd_seq_nbr;
+ u8 rsrvd_n_pdu_nbr;
+ u8 rsrvd_next_ext;
+ u8 rsvrd_ext_len;
+ u8 pdu_type;
+ u8 qfi;
+ u8 rsvrd;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -196,6 +216,7 @@ union ice_prot_hdr {
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
+ struct ice_udp_gtp_hdr gtp_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 000c39d163a2..a1cd33273ca4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_trace.h"
#define E810_OUT_PROP_DELAY_NS 1
@@ -2063,11 +2064,15 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
struct sk_buff *skb;
int err;
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
&raw_tstamp);
if (err)
continue;
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
/* Check if the timestamp is invalid or stale */
if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
raw_tstamp == tx->tstamps[idx].cached_tstamp)
@@ -2093,6 +2098,8 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
@@ -2131,6 +2138,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
tx->tstamps[idx].start = jiffies;
tx->tstamps[idx].skb = skb_get(skb);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ ice_trace(tx_tstamp_request, skb, idx);
}
spin_unlock(&tx->lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec8450f034e6..6dff97d53d81 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -3251,6 +3251,37 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
+ * ice_read_pca9575_reg_e810t
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_is_pca9575_present
* @hw: pointer to the hw struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 519e75462e67..1246e4ee4b5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -191,6 +191,7 @@ int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -443,4 +444,10 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define ICE_SMA_MAX_BIT_E810T 7
#define ICE_PCA9575_P1_OFFSET 8
+/* E810T PCA9575 IO controller registers */
+#define ICE_PCA9575_P0_IN 0x0
+
+/* E810T PCA9575 IO controller pin control */
+#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index dcc310e29300..848f2adea563 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "ice_devlink.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
#include "ice_tc_lib.h"
/**
@@ -142,6 +142,59 @@ ice_repr_get_devlink_port(struct net_device *netdev)
return &repr->vf->devlink_port;
}
+/**
+ * ice_repr_sp_stats64 - get slow path stats for port representor
+ * @dev: network interface device structure
+ * @stats: netlink stats structure
+ *
+ * RX/TX stats are being swapped here to be consistent with VF stats. In slow
+ * path, port representor receives data when the corresponding VF is sending it
+ * (and vice versa), TX and RX bytes/packets are effectively swapped on port
+ * representor.
+ */
+static int
+ice_repr_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ int vf_id = np->repr->vf->vf_id;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ u64 pkts, bytes;
+
+ tx_ring = np->vsi->tx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+ &pkts, &bytes);
+ stats->rx_packets = pkts;
+ stats->rx_bytes = bytes;
+
+ rx_ring = np->vsi->rx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+ &pkts, &bytes);
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
+ rx_ring->rx_stats.alloc_buf_failed;
+
+ return 0;
+}
+
+static bool
+ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
+}
+
static int
ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
struct flow_cls_offload *flower)
@@ -199,6 +252,8 @@ static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_get_devlink_port = ice_repr_get_devlink_port,
.ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
/**
@@ -284,6 +339,8 @@ static int ice_repr_add(struct ice_vf *vf)
devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+ ice_virtchnl_set_repr_ops(vf);
+
return 0;
err_netdev:
@@ -311,6 +368,9 @@ err_alloc_rule:
*/
static void ice_repr_rem(struct ice_vf *vf)
{
+ if (!vf->repr)
+ return;
+
ice_devlink_destroy_vf_port(vf);
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
@@ -323,6 +383,23 @@ static void ice_repr_rem(struct ice_vf *vf)
#endif
kfree(vf->repr);
vf->repr = NULL;
+
+ ice_virtchnl_set_dflt_ops(vf);
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ ice_repr_rem(vf);
}
/**
@@ -331,49 +408,27 @@ static void ice_repr_rem(struct ice_vf *vf)
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
+ struct ice_vf *vf;
+ unsigned int bkt;
int err;
- int i;
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ lockdep_assert_held(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf);
if (err)
goto err;
-
- ice_vc_change_ops_to_repr(&vf->vc_ops);
}
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
+ ice_repr_rem_from_all_vfs(pf);
return err;
}
/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
-}
-
-/**
* ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 0c77ff050d15..378a45bfa256 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -5,7 +5,6 @@
#define _ICE_REPR_H_
#include <net/dst_metadata.h>
-#include "ice.h"
struct ice_repr {
struct ice_vsi *src_vsi;
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 52c6bac41bf7..8915a9d39e36 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1,532 +1,1919 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
-#include "ice_common.h"
-#include "ice_sriov.h"
+#include "ice.h"
+#include "ice_vf_lib_private.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
+#include "ice_flow.h"
+#include "ice_eswitch.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
/**
- * ice_aq_send_msg_to_vf
- * @hw: pointer to the hardware structure
- * @vfid: VF ID to send msg
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cd: pointer to command details
+ * ice_free_vf_entries - Free all VF entries from the hash table
+ * @pf: pointer to the PF structure
*
- * Send message to VF driver (0x0802) using mailbox
- * queue and asynchronously sending message via
- * ice_sq_send_cmd() function
+ * Iterate over the VF hash table, removing and releasing all VF entries.
+ * Called during VF teardown or as cleanup during failed VF initialization.
*/
-int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+static void ice_free_vf_entries(struct ice_pf *pf)
{
- struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct ice_vfs *vfs = &pf->vfs;
+ struct hlist_node *tmp;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+ /* Remove all VFs from the hash table and release their main
+ * reference. Once all references to the VF are dropped, ice_put_vf()
+ * will call ice_release_vf which will remove the VF memory.
+ */
+ lockdep_assert_held(&vfs->table_lock);
- cmd = &desc.params.virt;
- cmd->id = cpu_to_le32(vfid);
+ hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ hash_del_rcu(&vf->entry);
+ ice_put_vf(vf);
+ }
+}
- desc.cookie_high = cpu_to_le32(v_opcode);
- desc.cookie_low = cpu_to_le32(v_retval);
+/**
+ * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
+ * @vf: invalidate this VF's VSI after freeing it
+ */
+static void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(ice_get_vf_vsi(vf));
+ ice_vf_invalidate_vsi(vf);
+}
- if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, last_vector_idx;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_vf_fdir_exit(vf);
+ /* free VF control VSI */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ /* free VSI and disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
+ vf->num_mac = 0;
+ }
- return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+ last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+ wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
/**
- * ice_conv_link_speed_to_virtchnl
- * @adv_link_support: determines the format of the returned link speed
- * @link_speed: variable containing the link_speed to be converted
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = ice_get_vf_vsi(vf);
+
+ dev = ice_pf_to_dev(pf);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->vfs.num_msix_per - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
+ *
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
*
- * Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
- * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
- * needs to cast back to an enum virtchnl_link_speed in the case where
- * adv_link_support is false, but when adv_link_support is true the caller can
- * expect the speed in Mbps.
+ * Returns 0 on success, and -EINVAL on error.
*/
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
{
- u32 speed;
+ struct ice_res_tracker *res;
- if (adv_link_support)
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- speed = ICE_LINK_SPEED_10MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100MB:
- speed = ICE_LINK_SPEED_100MBPS;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- speed = ICE_LINK_SPEED_1000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- speed = ICE_LINK_SPEED_2500MBPS;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- speed = ICE_LINK_SPEED_5000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = ICE_LINK_SPEED_10000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = ICE_LINK_SPEED_20000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = ICE_LINK_SPEED_25000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- speed = ICE_LINK_SPEED_40000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- speed = ICE_LINK_SPEED_50000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100GB:
- speed = ICE_LINK_SPEED_100000MBPS;
- break;
- default:
- speed = ICE_LINK_SPEED_UNKNOWN;
- break;
- }
+ if (!pf)
+ return -EINVAL;
+
+ res = pf->irq_tracker;
+ if (!res)
+ return -EINVAL;
+
+ /* give back irq_tracker resources used */
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
+
+ pf->sriov_base_vector = 0;
+
+ return 0;
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ if (!ice_has_vfs(pf))
+ return;
+
+ while (test_and_set_bit(ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
else
- /* Virtchnl speeds are not defined for every speed supported in
- * the hardware. To maintain compatibility with older AVF
- * drivers, while reporting the speed the new speed values are
- * resolved to the closest known virtchnl speeds
- */
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- case ICE_AQ_LINK_SPEED_100MB:
- speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- case ICE_AQ_LINK_SPEED_2500MB:
- case ICE_AQ_LINK_SPEED_5GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- case ICE_AQ_LINK_SPEED_50GB:
- case ICE_AQ_LINK_SPEED_100GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
- break;
- default:
- speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
- break;
+ dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ mutex_lock(&vfs->table_lock);
+
+ ice_eswitch_release(pf);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ /* disable VF qp mappings and set VF disable state */
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
}
- return speed;
+ if (!pci_vfs_assigned(pf->pdev)) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+
+ /* clear malicious info since the VF is getting released */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
+
+ vfs->num_qps_per = 0;
+ ice_free_vf_entries(pf);
+
+ mutex_unlock(&vfs->table_lock);
+
+ clear_bit(ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
-/* The mailbox overflow detection algorithm helps to check if there
- * is a possibility of a malicious VF transmitting too many MBX messages to the
- * PF.
- * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
- * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
- * The struct ice_mbx_snapshot helps to track and traverse a static window of
- * messages within the mailbox queue while looking for a malicious VF.
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @vf: VF to setup VSI for
*
- * 2. When the caller starts processing its mailbox queue in response to an
- * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
- * the algorithm can be run for the first time for that interrupt. This can be
- * done via ice_mbx_reset_snapshot().
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
+}
+
+/**
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
*
- * 3. For every message read by the caller from the MBX Queue, the caller must
- * call the detection algorithm's entry function ice_mbx_vf_state_handler().
- * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
- * filled as it is required to be passed to the algorithm.
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
+}
+
+/**
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
*
- * 4. Every time a message is read from the MBX queue, a VFId is received which
- * is passed to the state handler. The boolean output is_malvf of the state
- * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
- * whether this VF is malicious or not.
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
+ */
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
+{
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
+ struct ice_pf *pf = vf->pf;
+ int device_based_vf_id;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
+ /* map the interrupts to its functions */
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf;
+
+ if (!vf || !q_vector)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* always add one to account for the OICR being the first MSIX */
+ return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
+ q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
*
- * 5. When a VF is identified to be malicious, the caller can send a message
- * to the system administrator. The caller can invoke ice_mbx_report_malvf()
- * to help determine if a malicious VF is to be reported or not. This function
- * requires the caller to maintain a global bitmap to track all malicious VFs
- * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
- * to be malicious by ice_mbx_vf_state_handler().
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = res->num_entries - 1; i >= 0; i--)
+ if (res->list[i] & ICE_RES_VALID_BIT)
+ return i;
+
+ return 0;
+}
+
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
- * 6. The global bitmap maintained by PF can be cleared completely if PF is in
- * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
- * When a VF is shut down and brought back up, we assume that the new VF
- * brought up is not malicious and hence report it if found malicious.
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * 7. The function ice_mbx_reset_snapshot() is called to reset the information
- * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
- * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
- * when driver is unloaded.
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
+ * in the PF's space available for SR-IOV.
*/
-#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
-/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
- * the max messages check must be ignored in the algorithm
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
+ int sriov_base_vector;
+
+ sriov_base_vector = total_vectors - num_msix_needed;
+
+ /* make sure we only grab irq_tracker entries from the list end and
+ * that we have enough available MSIX vectors
+ */
+ if (sriov_base_vector < vectors_used)
+ return -EINVAL;
+
+ pf->sriov_base_vector = sriov_base_vector;
+
+ return 0;
+}
+
+/**
+ * ice_set_per_vf_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of SR-IOV VFs being configured
+ *
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
+ *
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
+{
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
+ int msix_avail_per_vf, msix_avail_for_sriov;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ if (max_valid_res_idx < 0)
+ return -ENOSPC;
+
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
+ } else {
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ num_vfs);
+ return -ENOSPC;
+ }
+
+ num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_txq = 0;
+ else if (num_txq > avail_qs)
+ num_txq = rounddown_pow_of_two(avail_qs);
+
+ num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_rxq = 0;
+ else if (num_rxq > avail_qs)
+ num_rxq = rounddown_pow_of_two(avail_qs);
+
+ if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, num_vfs);
+ return -ENOSPC;
+ }
+
+ err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
+ if (err) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
+ num_vfs, err);
+ return err;
+ }
+
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
+ pf->vfs.num_msix_per = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
+
+ return 0;
+}
/**
- * ice_mbx_traverse - Pass through mailbox snapshot
- * @hw: pointer to the HW struct
- * @new_state: new algorithm state
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
*
- * Traversing the mailbox static snapshot without checking
- * for malicious VFs.
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
*/
-static void
-ice_mbx_traverse(struct ice_hw *hw,
- enum ice_mbx_snapshot_state *new_state)
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
- struct ice_mbx_snap_buffer_data *snap_buf;
- u32 num_iterations;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
- snap_buf = &hw->mbx_snapshot.mbx_buf;
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
- /* As mailbox buffer is circular, applying a mask
- * on the incremented iteration count.
- */
- num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
-
- /* Checking either of the below conditions to exit snapshot traversal:
- * Condition-1: If the number of iterations in the mailbox is equal to
- * the mailbox head which would indicate that we have reached the end
- * of the static snapshot.
- * Condition-2: If the maximum messages serviced in the mailbox for a
- * given interrupt is the highest possible value then there is no need
- * to check if the number of messages processed is equal to it. If not
- * check if the number of messages processed is greater than or equal
- * to the maximum number of mailbox entries serviced in current work item.
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+ vf->vf_id, err);
+ goto release_vsi;
+ }
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ goto release_vsi;
+ }
+
+ vf->num_mac = 1;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ unsigned int bkt, it_cnt;
+ struct ice_vf *vf;
+ int retval;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ it_cnt = 0;
+ ice_for_each_vf(pf, bkt, vf) {
+ vf->vf_ops->clear_reset_trigger(vf);
+
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ it_cnt++;
+ }
+
+ ice_flush(hw);
+ return 0;
+
+teardown:
+ ice_for_each_vf(pf, bkt, vf) {
+ if (it_cnt == 0)
+ break;
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ it_cnt--;
+ }
+
+ return retval;
+}
+
+/**
+ * ice_sriov_free_vf - Free VF memory after all references are dropped
+ * @vf: pointer to VF to free
+ *
+ * Called by ice_put_vf through ice_release_vf once the last reference to a VF
+ * structure has been dropped.
+ */
+static void ice_sriov_free_vf(struct ice_vf *vf)
+{
+ mutex_destroy(&vf->cfg_lock);
+
+ kfree_rcu(vf, rcu);
+}
+
+/**
+ * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+}
+
+/**
+ * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
+ * @vf: pointer to VF structure
+ * @is_vflr: true if reset occurred due to VFLR
+ *
+ * Trigger and cleanup after a VF reset for a SR-IOV VF.
+ */
+static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* In the case of a VFLR, HW has already reset the VF and we just need
+ * to clean up. Otherwise we must first trigger the reset using the
+ * VFRTRIG register.
*/
- if (num_iterations == snap_buf->head ||
- (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
- ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
- *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ if (!is_vflr) {
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
+
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
+ }
+}
+
+/**
+ * ice_sriov_poll_reset_status - poll SRIOV VF reset status
+ * @vf: pointer to VF structure
+ *
+ * Returns true when reset is successful, else returns false
+ */
+static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M)
+ return true;
+
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
+ }
+ return false;
+}
+
+/**
+ * ice_sriov_clear_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
+ */
+static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ ice_flush(hw);
+}
+
+/**
+ * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
+ * @vf: VF to release and setup the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
+ * configuration change, etc.).
+ */
+static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ ice_vf_vsi_release(vf);
+ if (!ice_vf_vsi_setup(vf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+ ice_ena_vf_mappings(vf);
+ wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+static const struct ice_vf_ops ice_sriov_vf_ops = {
+ .reset_type = ICE_VF_RESET,
+ .free = ice_sriov_free_vf,
+ .clear_mbx_register = ice_sriov_clear_mbx_register,
+ .trigger_reset_register = ice_sriov_trigger_reset_register,
+ .poll_reset_status = ice_sriov_poll_reset_status,
+ .clear_reset_trigger = ice_sriov_clear_reset_trigger,
+ .vsi_rebuild = ice_sriov_vsi_rebuild,
+ .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
+};
+
+/**
+ * ice_create_vf_entries - Allocate and insert VF entries
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of VFs to allocate
+ *
+ * Allocate new VF entries and insert them into the hash table. Set some
+ * basic default fields for initializing the new VFs.
+ *
+ * After this function exits, the hash table will have num_vfs entries
+ * inserted.
+ *
+ * Returns 0 on success or an integer error code on failure.
+ */
+static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
+{
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_vf *vf;
+ u16 vf_id;
+ int err;
+
+ lockdep_assert_held(&vfs->table_lock);
+
+ for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf) {
+ err = -ENOMEM;
+ goto err_free_entries;
+ }
+ kref_init(&vf->refcnt);
+
+ vf->pf = pf;
+ vf->vf_id = vf_id;
+
+ /* set sriov vf ops for VFs created during SRIOV flow */
+ vf->vf_ops = &ice_sriov_vf_ops;
+
+ vf->vf_sw_id = pf->first_sw;
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = pf->vfs.num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when VF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ ice_virtchnl_set_dflt_ops(vf);
+
+ mutex_init(&vf->cfg_lock);
+
+ hash_add_rcu(vfs->table, &vf->entry, vf_id);
+ }
+
+ return 0;
+
+err_free_entries:
+ ice_free_vf_entries(pf);
+ return err;
}
/**
- * ice_mbx_detect_malvf - Detect malicious VF in snapshot
- * @hw: pointer to the HW struct
- * @vf_id: relative virtual function ID
- * @new_state: new algorithm state
- * @is_malvf: boolean output to indicate if VF is malicious
+ * ice_ena_vfs - enable VFs so they are ready to be used
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to enable
+ */
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ set_bit(ICE_OICR_INTR_DIS, pf->state);
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
+ if (ret)
+ goto err_unroll_intr;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ ret = ice_set_per_vf_res(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
+ num_vfs, ret);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_create_vf_entries(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
+ num_vfs);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_start_vfs(pf);
+ if (ret) {
+ dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
+ ret = -EAGAIN;
+ goto err_unroll_vf_entries;
+ }
+
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret) {
+ dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
+ goto err_unroll_sriov;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ mutex_unlock(&pf->vfs.table_lock);
+
+ return 0;
+
+err_unroll_vf_entries:
+ ice_free_vf_entries(pf);
+err_unroll_sriov:
+ mutex_unlock(&pf->vfs.table_lock);
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(ICE_OICR_INTR_DIS, pf->state);
+ return ret;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
*
- * This function tracks the number of asynchronous messages
- * sent per VF and marks the VF as malicious if it exceeds
- * the permissible number of messages to send.
+ * Returns 0 on success and negative on failure
*/
-static int
-ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
- enum ice_mbx_snapshot_state *new_state,
- bool *is_malvf)
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
- if (vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return 0;
- /* increment the message count in the VF array */
- snap->mbx_vf.vf_cntr[vf_id]++;
+ if (num_vfs > pf->vfs.num_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->vfs.num_supported);
+ return -EOPNOTSUPP;
+ }
- if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
- *is_malvf = true;
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
- /* continue to iterate through the mailbox snapshot */
- ice_mbx_traverse(hw, new_state);
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
return 0;
}
/**
- * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
- * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
+ *
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_mbx_deinit_snapshot(&pf->hw);
+ ice_free_vfs(pf);
+ if (pf->lag)
+ ice_enable_lag(pf->lag);
+ return 0;
+ }
+
+ dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (err)
+ return err;
+
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err) {
+ ice_mbx_deinit_snapshot(&pf->hw);
+ return err;
+ }
+
+ if (pf->lag)
+ ice_disable_lag(pf->lag);
+ return num_vfs;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
*
- * Reset the mailbox snapshot structure and clear VF counter array.
+ * called from the VFLR IRQ handler to
+ * free up VF resources and state variables
*/
-static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+void ice_process_vflr_event(struct ice_pf *pf)
{
- u32 vfcntr_len;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u32 reg;
- if (!snap || !snap->mbx_vf.vf_cntr)
+ if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !ice_has_vfs(pf))
return;
- /* Clear VF counters. */
- vfcntr_len = snap->mbx_vf.vfcntr_len;
- if (vfcntr_len)
- memset(snap->mbx_vf.vf_cntr, 0,
- (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
-
- /* Reset mailbox snapshot for a new capture. */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-}
-
-/**
- * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
- * @hw: pointer to the HW struct
- * @mbx_data: pointer to structure containing mailbox data
- * @vf_id: relative virtual function (VF) ID
- * @is_malvf: boolean output to indicate if VF is malicious
- *
- * The function serves as an entry point for the malicious VF
- * detection algorithm by handling the different states and state
- * transitions of the algorithm:
- * New snapshot: This state is entered when creating a new static
- * snapshot. The data from any previous mailbox snapshot is
- * cleared and a new capture of the mailbox head and tail is
- * logged. This will be the new static snapshot to detect
- * asynchronous messages sent by VFs. On capturing the snapshot
- * and depending on whether the number of pending messages in that
- * snapshot exceed the watermark value, the state machine enters
- * traverse or detect states.
- * Traverse: If pending message count is below watermark then iterate
- * through the snapshot without any action on VF.
- * Detect: If pending message count exceeds watermark traverse
- * the static snapshot and look for a malicious VF.
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
+ *
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ *
+ * If this function returns non-NULL, it acquires a reference count of the VF
+ * structure. The caller is responsible for calling ice_put_vf() to drop this
+ * reference.
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+ ice_put_vf(vf);
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ ret = -ENODEV;
+ goto out_put_vf;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
+ if (ret)
+ dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
+ ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
+ else
+ vf->spoofchk = ena;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
*/
int
-ice_mbx_vf_state_handler(struct ice_hw *hw,
- struct ice_mbx_data *mbx_data, u16 vf_id,
- bool *is_malvf)
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
- struct ice_mbx_snap_buffer_data *snap_buf;
- struct ice_ctl_q_info *cq = &hw->mailboxq;
- enum ice_mbx_snapshot_state new_state;
- int status = 0;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
- if (!is_malvf || !mbx_data)
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* When entering the mailbox state machine assume that the VF
- * is not malicious until detected.
- */
- *is_malvf = false;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = ice_vf_get_port_vlan_id(vf);
+ ivi->qos = ice_vf_get_port_vlan_prio(vf);
+ if (ice_vf_is_port_vlan_ena(vf))
+ ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
+ * @pf: PF used to reference the switch's rules
+ * @umac: unicast MAC to compare against existing switch rules
+ *
+ * Return true on the first/any match, else return false
+ */
+static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
+{
+ struct ice_sw_recipe *mac_recipe_list =
+ &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* protect MAC filter list access */
+
+ rule_head = &mac_recipe_list->filt_rules;
+ rule_lock = &mac_recipe_list->filt_rule_lock;
- /* Checking if max messages allowed to be processed while servicing current
- * interrupt is not less than the defined AVF message threshold.
- */
- if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ mutex_lock(rule_lock);
+ list_for_each_entry(list_itr, rule_head, list_entry) {
+ u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (ether_addr_equal(existing_mac, umac)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* The watermark value should not be lesser than the threshold limit
- * set for the number of asynchronous messages a VF can send to mailbox
- * nor should it be greater than the maximum number of messages in the
- * mailbox serviced in current interrupt.
+ if (ice_unicast_mac_exists(pf, mac)) {
+ netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
+ mac, vf_id, mac);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ mutex_lock(&vf->cfg_lock);
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
- mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ ether_addr_copy(vf->dev_lan_addr.addr, mac);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- snap_buf = &snap->mbx_buf;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- switch (snap_buf->state) {
- case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
- /* Clear any previously held data in mailbox snapshot structure. */
- ice_mbx_reset_snapshot(snap);
+ /* Check if already trusted */
+ if (trusted == vf->trusted) {
+ ret = 0;
+ goto out_put_vf;
+ }
- /* Collect the pending ARQ count, number of messages processed and
- * the maximum number of messages allowed to be processed from the
- * Mailbox for current interrupt.
- */
- snap_buf->num_pending_arq = mbx_data->num_pending_arq;
- snap_buf->num_msg_proc = mbx_data->num_msg_proc;
- snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+ mutex_lock(&vf->cfg_lock);
- /* Capture a new static snapshot of the mailbox by logging the
- * head and tail of snapshot and set num_iterations to the tail
- * value to mark the start of the iteration through the snapshot.
- */
- snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
- mbx_data->num_pending_arq);
- snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
- snap_buf->num_iterations = snap_buf->tail;
-
- /* Pending ARQ messages returned by ice_clean_rq_elem
- * is the difference between the head and tail of the
- * mailbox queue. Comparing this value against the watermark
- * helps to check if we potentially have malicious VFs.
- */
- if (snap_buf->num_pending_arq >=
- mbx_data->async_watermark_val) {
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- } else {
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- }
- break;
+ vf->trusted = trusted;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
- case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- break;
+ mutex_unlock(&vf->cfg_lock);
- case ICE_MAL_VF_DETECT_STATE_DETECT:
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- break;
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
default:
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = -EIO;
+ ret = -EINVAL;
+ goto out_put_vf;
}
- snap_buf->state = new_state;
+ ice_vc_notify_vf_link_state(vf);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int rate = 0;
- return status;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ rate += vf->min_tx_rate;
+ rcu_read_unlock();
+
+ return rate;
}
/**
- * ice_mbx_report_malvf - Track and note malicious VF
- * @hw: pointer to the HW struct
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
- * @report_malvf: boolean to indicate if malicious VF must be reported
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
*
- * This function will update a bitmap that keeps track of the malicious
- * VFs attached to the PF. A malicious VF must be reported only once if
- * discovered between VF resets or loading so the function checks
- * the input vf_id against the bitmap to verify if the VF has been
- * detected in any previous mailbox iterations.
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+ int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
*/
int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf)
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- if (!all_malvfs || !report_malvf)
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- *report_malvf = false;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+ * check if max_tx_rate is non-zero
+ */
+ if (max_tx_rate && min_tx_rate > max_tx_rate) {
+ dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+ min_tx_rate, max_tx_rate);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ ret = -EOPNOTSUPP;
+ goto out_put_vf;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_eth_stats *stats;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret;
- if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- if (vf_id >= bitmap_len)
- return -EIO;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* If the vf_id is found in the bitmap set bit and boolean to true */
- if (!test_and_set_bit(vf_id, all_malvfs))
- *report_malvf = true;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
- return 0;
+ ice_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
+ * @hw: hardware structure used to check the VLAN mode
+ * @vlan_proto: VLAN TPID being checked
+ *
+ * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
+ * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
+ * Mode (SVM), then only ETH_P_8021Q is supported.
+ */
+static bool
+ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
+{
+ bool is_supported = false;
+
+ switch (vlan_proto) {
+ case ETH_P_8021Q:
+ is_supported = true;
+ break;
+ case ETH_P_8021AD:
+ if (ice_is_dvm_ena(hw))
+ is_supported = true;
+ break;
+ }
+
+ return is_supported;
}
/**
- * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
- * @snap: pointer to the mailbox snapshot structure
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN ID being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
*
- * In case of a VF reset, this function can be called to clear
- * the bit corresponding to the VF ID in the bitmap tracking all
- * malicious VFs attached to the PF. The function also clears the
- * VF counter array at the index of the VF ID. This is to ensure
- * that the new VF loaded is not considered malicious before going
- * through the overflow detection algorithm.
+ * program VF Port VLAN ID and/or QoS
*/
int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id)
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
- if (!snap || !all_malvfs)
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u16 local_vlan_proto = ntohs(vlan_proto);
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
return -EINVAL;
+ }
- if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
+ dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
+ local_vlan_proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
return -EINVAL;
- /* Ensure VF ID value is not larger than bitmap or VF counter length */
- if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
- /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
- clear_bit(vf_id, all_malvfs);
+ if (ice_vf_get_port_vlan_prio(vf) == qos &&
+ ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
+ ice_vf_get_port_vlan_id(vf) == vlan_id) {
+ /* duplicate request, so just return success */
+ dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
+ vlan_id, qos, local_vlan_proto);
+ ret = 0;
+ goto out_put_vf;
+ }
- /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
- * This is to ensure that if a VF is unloaded and a new one brought back
- * up with the same VF ID for a snapshot currently in traversal or detect
- * state the counter for that VF ID does not increment on top of existing
- * values in the mailbox overflow detection algorithm.
- */
- snap->mbx_vf.vf_cntr[vf_id] = 0;
+ mutex_lock(&vf->cfg_lock);
- return 0;
+ vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
+ if (ice_vf_is_port_vlan_ena(vf))
+ dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
+ vlan_id, qos, local_vlan_proto, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
}
/**
- * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
- * @hw: pointer to the hardware structure
- * @vf_count: number of VFs allocated on a PF
- *
- * Clear the mailbox snapshot structure and allocate memory
- * for the VF counter array based on the number of VFs allocated
- * on that PF.
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
*
- * Assumption: This function will assume ice_get_caps() has already been
- * called to ensure that the vf_count can be compared against the number
- * of VFs supported as defined in the functional capabilities of the device.
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
*/
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
- /* Ensure that the number of VFs allocated is non-zero and
- * is not greater than the number of supported VFs defined in
- * the functional capabilities of the PF.
- */
- if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return -EINVAL;
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
- snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
- sizeof(*snap->mbx_vf.vf_cntr),
- GFP_KERNEL);
- if (!snap->mbx_vf.vf_cntr)
- return -ENOMEM;
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
+ return;
- /* Setting the VF counter length to the number of allocated
- * VFs for given PF's functional capabilities.
- */
- snap->mbx_vf.vfcntr_len = vf_count;
+ pf->vfs.last_printed_mdd_jiffies = jiffies;
- /* Clear mbx_buf in the mailbox snaphot structure and setting the
- * mailbox snapshot state to a new capture.
- */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+ ice_print_vf_rx_mdd_event(vf);
+ }
- return 0;
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
+ vf->dev_lan_addr.addr);
+ }
+ }
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
- * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
- * @hw: pointer to the hardware structure
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
*
- * Clear the mailbox snapshot structure and free the VF counter array.
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vfdev;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
+}
+
+/**
+ * ice_is_malicious_vf - helper function to detect a malicious VF
+ * @pf: ptr to struct ice_pf
+ * @event: pointer to the AQ event
+ * @num_msg_proc: the number of messages processed so far
+ * @num_msg_pending: the number of messages peinding in admin queue
*/
-void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_mbx_data mbxdata;
+ bool malvf = false;
+ struct ice_vf *vf;
+ int status;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return false;
- /* Free VF counter array and reset VF counter length */
- devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
- snap->mbx_vf.vfcntr_len = 0;
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ goto out_put_vf;
+
+ mbxdata.num_msg_proc = num_msg_proc;
+ mbxdata.num_pending_arq = num_msg_pending;
+ mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+ mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+ /* check to see if we have a malicious VF */
+ status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
+ if (status)
+ goto out_put_vf;
+
+ if (malvf) {
+ bool report_vf = false;
+
+ /* if the VF is malicious and we haven't let the user
+ * know about it, then let them know now
+ */
+ status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf_id,
+ &report_vf);
+ if (status)
+ dev_dbg(dev, "Error reporting malicious VF\n");
+
+ if (report_vf) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+ if (pf_vsi)
+ dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+ &vf->dev_lan_addr.addr[0],
+ pf_vsi->netdev->dev_addr);
+ }
+ }
- /* Clear mbx_buf in the mailbox snaphot structure */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+out_put_vf:
+ ice_put_vf(vf);
+ return malvf;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 68686a3fd7e8..955ab810a198 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,50 +3,159 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
+#include "ice_virtchnl_fdir.h"
+#include "ice_vf_lib.h"
+#include "ice_virtchnl.h"
-#include "ice_type.h"
-#include "ice_controlq.h"
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
-/* Defining the mailbox message threshold as 63 asynchronous
- * pending messages. Normal VF functionality does not require
- * sending more than 63 asynchronous pending message.
- */
-#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resource constraints */
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_NONQ_VECS_VF 1
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_TRIES 40
+#define ICE_MAX_VF_RESET_SLEEP_MS 20
#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+bool
+ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
+ u16 num_msg_proc, u16 num_msg_pending);
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
int
-ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
- u16 vf_id, bool *is_mal_vf);
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
+
int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id);
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
-void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf);
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
#else /* CONFIG_PCI_IOV */
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+
+static inline bool
+ice_is_malicious_vf(struct ice_pf __always_unused *pf,
+ struct ice_rq_event_info __always_unused *event,
+ u16 __always_unused num_msg_proc,
+ u16 __always_unused num_msg_pending)
+{
+ return false;
+}
+
static inline int
-ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
- u16 __always_unused vfid, u32 __always_unused v_opcode,
- u32 __always_unused v_retval, u8 __always_unused *msg,
- u16 __always_unused msglen,
- struct ice_sq_cd __always_unused *cd)
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
}
-static inline u32
-ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
- u16 __always_unused link_speed)
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+ struct ice_q_vector __always_unused *q_vector)
{
return 0;
}
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_stats __always_unused *vf_stats)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 475ec2afa210..25b8f6f726eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -41,6 +41,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_TCP_IL, 76 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -65,7 +66,8 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -86,6 +88,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_UDP_ILOS, 76 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -110,7 +113,8 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -131,6 +135,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_TCP_IL, 84 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -158,7 +163,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -182,6 +188,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_UDP_ILOS, 84 },
{ ICE_PROTOCOL_LAST, 0 },
@@ -209,7 +216,8 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -221,6 +229,224 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
+static const struct ice_dummy_pkt_offsets
+dummy_gre_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_TCP_IL, 96 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_gre_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_UDP_ILOS, 96 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_udp_tun_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_TCP_IL, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x5a, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets
+dummy_udp_tun_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_UDP_ILOS, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x4e, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
/* offset info for MAC + IPv4 + UDP dummy packet */
static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
@@ -500,6 +726,495 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_TCP_IL, 82 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x58, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_UDP_ILOS, 82 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x38, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_TCP_IL, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x60, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_TCP_IL, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x44, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x38, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x38, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_TCP_IL, 122 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x58, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_UDP_ILOS, 122 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x4c, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const u8 dummy_ipv4_gtpu_ipv4_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP_NO_PAY, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00,
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -1097,6 +1812,64 @@ ice_aq_get_recipe(struct ice_hw *hw,
}
/**
+ * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
+ * @hw: pointer to the HW struct
+ * @params: parameters used to update the default recipe
+ *
+ * This function only supports updating default recipes and it only supports
+ * updating a single recipe based on the lkup_idx at a time.
+ *
+ * This is done as a read-modify-write operation. First, get the current recipe
+ * contents based on the recipe's ID. Then modify the field vector index and
+ * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
+ * the pre-existing recipe with the modifications.
+ */
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params)
+{
+ struct ice_aqc_recipe_data_elem *rcp_list;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ int status;
+
+ rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
+ if (!rcp_list)
+ return -ENOMEM;
+
+ /* read current recipe list from firmware */
+ rcp_list->recipe_indx = params->rid;
+ status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
+ params->rid, status);
+ goto error_out;
+ }
+
+ /* only modify existing recipe's lkup_idx and mask if valid, while
+ * leaving all other fields the same, then update the recipe firmware
+ */
+ rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
+ if (params->mask_valid)
+ rcp_list->content.mask[params->lkup_idx] =
+ cpu_to_le16(params->mask);
+
+ if (params->ignore_valid)
+ rcp_list->content.lkup_indx[params->lkup_idx] |=
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+
+ status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask, params->mask_valid ? "true" : "false",
+ status);
+
+error_out:
+ kfree(rcp_list);
+ return status;
+}
+
+/**
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
@@ -1539,6 +2312,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
+ u16 vlan_tpid = ETH_P_8021Q;
void *daddr = NULL;
u16 eth_hdr_sz;
u8 *eth_hdr;
@@ -1611,6 +2385,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_VLAN:
vlan_id = f_info->l_data.vlan.vlan_id;
+ if (f_info->l_data.vlan.tpid_valid)
+ vlan_tpid = f_info->l_data.vlan.tpid;
if (f_info->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
act |= ICE_SINGLE_ACT_PRUNE;
@@ -1653,6 +2429,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ *off = cpu_to_be16(vlan_tpid);
}
/* Create the switch rule with the final dummy Ethernet header */
@@ -3755,6 +4533,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_ETYPE_OL, { 0 } },
+ { ICE_ETYPE_IL, { 0 } },
{ ICE_VLAN_OFOS, { 2, 0 } },
{ ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
{ ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
@@ -3767,13 +4546,16 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, { 0, 2 } },
{ ICE_VXLAN, { 8, 10, 12, 14 } },
{ ICE_GENEVE, { 8, 10, 12, 14 } },
- { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
+ { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
{ ICE_MAC_IL, ICE_MAC_IL_HW },
{ ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
{ ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
{ ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
{ ICE_IPV4_IL, ICE_IPV4_IL_HW },
@@ -3784,7 +4566,9 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
{ ICE_VXLAN, ICE_UDP_OF_HW },
{ ICE_GENEVE, ICE_UDP_OF_HW },
- { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_GTP, ICE_UDP_OF_HW },
+ { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
};
/**
@@ -3868,6 +4652,23 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/**
+ * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
+ *
+ * As protocol id for outer vlan is different in dvm and svm, if dvm is
+ * supported protocol array record for outer vlan has to be modified to
+ * reflect the value proper for DVM.
+ */
+void ice_change_proto_id_to_dvm(void)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
+ ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
+ ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
+}
+
+/**
* ice_prot_type_to_id - get protocol ID from protocol type
* @type: protocol type
* @id: pointer to variable that will receive the ID
@@ -4427,41 +5228,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
/**
- * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
- * @hw: pointer to hardware structure
- * @lkups: lookup elements or match criteria for the advanced recipe, one
- * structure per protocol header
- * @lkups_cnt: number of protocols
- * @bm: bitmap of field vectors to consider
- * @fv_list: pointer to a list that holds the returned field vectors
- */
-static int
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- unsigned long *bm, struct list_head *fv_list)
-{
- u8 *prot_ids;
- int status;
- u16 i;
-
- prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
- if (!prot_ids)
- return -ENOMEM;
-
- for (i = 0; i < lkups_cnt; i++)
- if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = -EIO;
- goto free_mem;
- }
-
- /* Find field vectors that include all specified protocol types */
- status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
-
-free_mem:
- kfree(prot_ids);
- return status;
-}
-
-/**
* ice_tun_type_match_word - determine if tun type needs a match mask
* @tun_type: tunnel type
* @mask: mask to be used for the tunnel
@@ -4472,6 +5238,8 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
case ICE_SW_TUN_GENEVE:
case ICE_SW_TUN_VXLAN:
case ICE_SW_TUN_NVGRE:
+ case ICE_SW_TUN_GTPU:
+ case ICE_SW_TUN_GTPC:
*mask = ICE_TUN_FLAG_MASK;
return true;
@@ -4537,6 +5305,12 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_GTPU:
+ prof_type = ICE_PROF_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_GTPC:
+ prof_type = ICE_PROF_TUN_GTPC;
+ break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@@ -4609,11 +5383,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
- * ice_get_fv.
+ * ice_get_sw_fv_list.
*/
ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
- status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
if (status)
goto err_unroll;
@@ -4737,34 +5511,126 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
const u8 **pkt, u16 *pkt_len,
const struct ice_dummy_pkt_offsets **offsets)
{
- bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ bool inner_tcp = false, inner_udp = false, outer_ipv6 = false;
+ bool vlan = false, inner_ipv6 = false, gtp_no_pay = false;
u16 i;
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- udp = true;
+ inner_udp = true;
else if (lkups[i].type == ICE_TCP_IL)
- tcp = true;
+ inner_tcp = true;
else if (lkups[i].type == ICE_IPV6_OFOS)
- ipv6 = true;
+ outer_ipv6 = true;
else if (lkups[i].type == ICE_VLAN_OFOS)
vlan = true;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
- cpu_to_be16(0xFFFF))
- ipv6 = true;
+ cpu_to_be16(0xFFFF))
+ outer_ipv6 = true;
+ else if (lkups[i].type == ICE_ETYPE_IL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ inner_ipv6 = true;
+ else if (lkups[i].type == ICE_IPV6_IL)
+ inner_ipv6 = true;
+ else if (lkups[i].type == ICE_GTP_NO_PAY)
+ gtp_no_pay = true;
+ }
+
+ if (tun_type == ICE_SW_TUN_GTPU) {
+ if (outer_ipv6) {
+ if (gtp_no_pay) {
+ *pkt = dummy_ipv6_gtp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtp_packet);
+ *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+ } else if (inner_ipv6) {
+ if (inner_udp) {
+ *pkt = dummy_ipv6_gtpu_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets;
+ }
+ } else {
+ if (inner_udp) {
+ *pkt = dummy_ipv6_gtpu_ipv4_udp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet);
+ *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets;
+ }
+ }
+ } else {
+ if (gtp_no_pay) {
+ *pkt = dummy_ipv4_gtpu_ipv4_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+ *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+ } else if (inner_ipv6) {
+ if (inner_udp) {
+ *pkt = dummy_ipv4_gtpu_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets;
+ }
+ } else {
+ if (inner_udp) {
+ *pkt = dummy_ipv4_gtpu_ipv4_udp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets;
+ } else {
+ *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet);
+ *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets;
+ }
+ }
+ }
+ return;
+ }
+
+ if (tun_type == ICE_SW_TUN_GTPC) {
+ if (outer_ipv6) {
+ *pkt = dummy_ipv6_gtp_packet;
+ *pkt_len = sizeof(dummy_ipv6_gtp_packet);
+ *offsets = dummy_ipv6_gtp_no_pay_packet_offsets;
+ } else {
+ *pkt = dummy_ipv4_gtpu_ipv4_packet;
+ *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet);
+ *offsets = dummy_ipv4_gtp_no_pay_packet_offsets;
+ }
+ return;
}
if (tun_type == ICE_SW_TUN_NVGRE) {
- if (tcp) {
+ if (inner_tcp && inner_ipv6) {
+ *pkt = dummy_gre_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet);
+ *offsets = dummy_gre_ipv6_tcp_packet_offsets;
+ return;
+ }
+ if (inner_tcp) {
*pkt = dummy_gre_tcp_packet;
*pkt_len = sizeof(dummy_gre_tcp_packet);
*offsets = dummy_gre_tcp_packet_offsets;
return;
}
-
+ if (inner_ipv6) {
+ *pkt = dummy_gre_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_gre_ipv6_udp_packet);
+ *offsets = dummy_gre_ipv6_udp_packet_offsets;
+ return;
+ }
*pkt = dummy_gre_udp_packet;
*pkt_len = sizeof(dummy_gre_udp_packet);
*offsets = dummy_gre_udp_packet_offsets;
@@ -4773,20 +5639,31 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
if (tun_type == ICE_SW_TUN_VXLAN ||
tun_type == ICE_SW_TUN_GENEVE) {
- if (tcp) {
+ if (inner_tcp && inner_ipv6) {
+ *pkt = dummy_udp_tun_ipv6_tcp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet);
+ *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets;
+ return;
+ }
+ if (inner_tcp) {
*pkt = dummy_udp_tun_tcp_packet;
*pkt_len = sizeof(dummy_udp_tun_tcp_packet);
*offsets = dummy_udp_tun_tcp_packet_offsets;
return;
}
-
+ if (inner_ipv6) {
+ *pkt = dummy_udp_tun_ipv6_udp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet);
+ *offsets = dummy_udp_tun_ipv6_udp_packet_offsets;
+ return;
+ }
*pkt = dummy_udp_tun_udp_packet;
*pkt_len = sizeof(dummy_udp_tun_udp_packet);
*offsets = dummy_udp_tun_udp_packet_offsets;
return;
}
- if (udp && !ipv6) {
+ if (inner_udp && !outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_udp_packet;
*pkt_len = sizeof(dummy_vlan_udp_packet);
@@ -4797,7 +5674,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_udp_packet);
*offsets = dummy_udp_packet_offsets;
return;
- } else if (udp && ipv6) {
+ } else if (inner_udp && outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_udp_ipv6_packet;
*pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
@@ -4808,7 +5685,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_udp_ipv6_packet);
*offsets = dummy_udp_ipv6_packet_offsets;
return;
- } else if ((tcp && ipv6) || ipv6) {
+ } else if ((inner_tcp && outer_ipv6) || outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_tcp_ipv6_packet;
*pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
@@ -4885,6 +5762,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ether_hdr);
break;
case ICE_ETYPE_OL:
+ case ICE_ETYPE_IL:
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
@@ -4913,6 +5791,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GENEVE:
len = sizeof(struct ice_udp_tnl_hdr);
break;
+ case ICE_GTP_NO_PAY:
+ case ICE_GTP:
+ len = sizeof(struct ice_udp_gtp_hdr);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8334beaaa8a..ed3d1d03befa 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,6 +14,15 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
+/* Switch Profile IDs for Profile related switch rules */
+#define ICE_PROFID_IPV4_GTPC_TEID 41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
+#define ICE_PROFID_IPV4_GTPU_TEID 43
+#define ICE_PROFID_IPV6_GTPC_TEID 44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
+#define ICE_PROFID_IPV6_GTPU_TEID 46
+#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
@@ -33,15 +42,6 @@ struct ice_vsi_ctx {
struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
-enum ice_sw_fwd_act_type {
- ICE_FWD_TO_VSI = 0,
- ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
- ICE_FWD_TO_Q,
- ICE_FWD_TO_QGRP,
- ICE_DROP_PACKET,
- ICE_INVAL_ACT
-};
-
/* Switch recipe ID enum values are specific to hardware */
enum ice_sw_lkup_type {
ICE_SW_LKUP_ETHERTYPE = 0,
@@ -86,6 +86,8 @@ struct ice_fltr_info {
} mac_vlan;
struct {
u16 vlan_id;
+ u16 tpid;
+ u8 tpid_valid;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
@@ -125,6 +127,15 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_update_recipe_lkup_idx_params {
+ u16 rid;
+ u16 fv_idx;
+ bool ignore_valid;
+ u16 mask;
+ bool mask_valid;
+ u8 lkup_idx;
+};
+
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
union ice_prot_hdr h_u; /* Header values */
@@ -367,4 +378,8 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params);
+void ice_change_proto_id_to_dvm(void);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 65cf32eb4046..3acd9f921c44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -24,6 +24,12 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ lkups_cnt++;
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
@@ -33,9 +39,7 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
- /* currently inner etype filter isn't supported */
- if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
- fltr->tunnel_type == TNL_LAST)
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
lkups_cnt++;
/* are MAC fields specified? */
@@ -64,6 +68,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
}
+static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
+{
+ return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
+}
+
static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
{
return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
@@ -96,6 +105,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GENEVE;
case TNL_GRETAP:
return ICE_NVGRE;
+ case TNL_GTPU:
+ /* NO_PAY profiles will not work with GTP-U */
+ return ICE_GTP;
+ case TNL_GTPC:
+ return ICE_GTP_NO_PAY;
default:
return 0;
}
@@ -111,6 +125,10 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GENEVE;
case TNL_GRETAP:
return ICE_SW_TUN_NVGRE;
+ case TNL_GTPU:
+ return ICE_SW_TUN_GTPU;
+ case TNL_GTPC:
+ return ICE_SW_TUN_GTPC;
default:
return ICE_NON_TUN;
}
@@ -137,7 +155,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
break;
case TNL_GRETAP:
list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
- memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+ memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
+ "\xff\xff\xff\xff", 4);
+ i++;
+ break;
+ case TNL_GTPC:
+ case TNL_GTPU:
+ list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
+ memcpy(&list[i].m_u.gtp_hdr.teid,
+ "\xff\xff\xff\xff", 4);
i++;
break;
default:
@@ -145,6 +171,33 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
}
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
+ list[i].type = ice_proto_type_from_mac(false);
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ hdr->l2_key.dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ hdr->l2_mask.dst_mac);
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+
+ if (fltr->gtp_pdu_info_masks.pdu_type) {
+ list[i].h_u.gtp_hdr.pdu_type =
+ fltr->gtp_pdu_info_keys.pdu_type << 4;
+ memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
+ }
+
+ if (fltr->gtp_pdu_info_masks.qfi) {
+ list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
+ memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
+ }
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -224,8 +277,10 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
headers = &tc_fltr->inner_headers;
inner = true;
- } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
- list[i].type = ICE_ETYPE_OL;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ice_proto_type_from_etype(inner);
list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
i++;
@@ -344,6 +399,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev))
return TNL_GRETAP;
+
+ /* Assume GTP-U by default in case of GTP netdev.
+ * GTP-C may be selected later, based on enc_dst_port.
+ */
+ if (netif_is_gtp(tunnel_dev))
+ return TNL_GTPU;
return TNL_LAST;
}
@@ -743,6 +804,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
return NULL;
}
+/**
+ * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ *
+ * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
+ * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
+ * therefore making GTP-U the default choice (when destination port number is
+ * not specified).
+ */
+static int
+ice_parse_gtp_type(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr)
+{
+ u16 dst_port;
+
+ if (match.key->dst) {
+ dst_port = be16_to_cpu(match.key->dst);
+
+ switch (dst_port) {
+ case 2152:
+ break;
+ case 2123:
+ fltr->tunnel_type = TNL_GTPC;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
@@ -798,8 +893,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
- if (ice_tc_set_port(match, fltr, headers, true))
- return -EINVAL;
+
+ if (fltr->tunnel_type != TNL_GTPU) {
+ if (ice_tc_set_port(match, fltr, headers, true))
+ return -EINVAL;
+ } else {
+ if (ice_parse_gtp_type(match, fltr))
+ return -EINVAL;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
}
return 0;
@@ -837,6 +952,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
@@ -1059,12 +1175,24 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
* this code won't do anything
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
+ * 3. For tunnel, as of now TC-filter through flower classifier doesn't
+ * have provision for user to specify outer DMAC, hence driver to
+ * implicitly add outer dest MAC to be lower netdev's active unicast
+ * MAC address.
*/
- if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
- ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
- main_vsi->netdev->dev_addr);
- eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ if (fltr->tunnel_type != TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
+
+ if (fltr->tunnel_type == TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ vsi->netdev->dev_addr);
+ memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
}
/* validate specified dest MAC address, make sure either it belongs to
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 319049477959..e25e958f4396 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -22,6 +22,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -119,6 +120,8 @@ struct ice_tc_flower_fltr {
struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
struct ice_vsi *src_vsi;
__be32 tenant_id;
+ struct gtp_pdu_session_info gtp_pdu_info_keys;
+ struct gtp_pdu_session_info gtp_pdu_info_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index cf685247c07a..ae98d5a8ff60 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -216,6 +216,30 @@ DEFINE_EVENT(ice_xmit_template, name, \
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
+ TP_PROTO(struct sk_buff *skb, int idx),
+
+ TP_ARGS(skb, idx),
+
+ TP_STRUCT__entry(__field(void *, skb)
+ __field(int, idx)),
+
+ TP_fast_assign(__entry->skb = skb;
+ __entry->idx = idx;),
+
+ TP_printk("skb %pK idx %d",
+ __entry->skb, __entry->idx)
+);
+#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_tstamp_template, name, \
+ TP_PROTO(struct sk_buff *skb, int idx), \
+ TP_ARGS(skb, idx))
+
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_request);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3e38695f1c9d..f9bf008471c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -173,6 +173,8 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
+ tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -221,8 +223,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_buf *tx_buf;
/* get the bql data ready */
- if (!ice_ring_is_xdp(tx_ring))
- netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
@@ -311,10 +312,6 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
-
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
@@ -983,15 +980,17 @@ static struct sk_buff *
ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(xdp->data);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ ICE_RX_HDR_SIZE + metasize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
@@ -1003,8 +1002,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -1080,7 +1084,7 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1142,7 +1146,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
* hardware wrote DD then it will be non-zero
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -1156,7 +1160,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct ice_vsi *ctrl_vsi = rx_ring->vsi;
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf_id != ICE_INVAL_VFID)
+ ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
@@ -1228,14 +1232,13 @@ construct_skb:
continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits))) {
dev_kfree_skb_any(skb);
continue;
}
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb)) {
@@ -1460,7 +1463,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1513,7 +1516,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done))) {
+ if (napi_complete_done(napi, work_done)) {
ice_net_dim(q_vector);
ice_enable_interrupt(q_vector);
} else {
@@ -1917,12 +1920,16 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
return;
- /* currently, we always assume 802.1Q for VLAN insertion as VLAN
- * insertion for 802.1AD is not supported
+ /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
+ * feature flags, which the driver only allows either 802.1Q or 802.1ad
+ * VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
@@ -2295,6 +2302,13 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* prepare the VLAN tagging flags for Tx */
ice_tx_prepare_vlan_flags(tx_ring, first);
+ if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_IL2TAG2 <<
+ ICE_TXD_CTX_QW1_CMD_S));
+ offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
+ ICE_TX_FLAGS_VLAN_S;
+ }
/* set up TSO offload */
tso = ice_tso(first, &offload);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b7b3bd4816f0..cead3eb149bd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,7 +13,6 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
-#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -111,6 +110,8 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RING_QUARTER(R) ((R)->count >> 2)
+
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
@@ -122,6 +123,7 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
+#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -321,18 +323,21 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
/* stats structs */
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
struct ice_txq_stats tx_stats;
-
/* CL3 - 3rd cacheline starts here */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
struct ice_channel *ch;
struct ice_ptp_tx *tx_tstamps;
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
+ /* CL4 - 4th cacheline starts here */
+ u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_tx;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 0e87b98e0966..7ee38d02d1e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -209,9 +209,14 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
+ netdev_features_t features = rx_ring->netdev->features;
+ bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -222,6 +227,7 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
unsigned int total_bytes = 0, total_pkts = 0;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *next_dd_desc;
u16 next_dd = xdp_ring->next_dd;
@@ -233,7 +239,7 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
return;
- for (i = 0; i < ICE_TX_THRESH; i++) {
+ for (i = 0; i < tx_thresh; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
total_bytes += tx_buf->bytecount;
@@ -254,9 +260,9 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
}
next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_dd = tx_thresh - 1;
xdp_ring->next_to_clean = ntc;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
}
@@ -269,12 +275,13 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
*/
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
ice_clean_xdp_irq(xdp_ring);
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
@@ -300,13 +307,14 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
+ xdp_ring->xdp_tx_active++;
i++;
if (i == xdp_ring->count) {
i = 0;
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = tx_thresh - 1;
}
xdp_ring->next_to_use = i;
@@ -314,7 +322,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += ICE_TX_THRESH;
+ xdp_ring->next_rs += tx_thresh;
}
return ICE_XDP_TX;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 11b6c1601986..c7d2954dc9ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -7,7 +7,7 @@
/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
*
* This function does some fast chicanery in order to return the
@@ -16,9 +16,9 @@
* at offset zero.
*/
static inline bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
{
- return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+ return !!(status_err_n & cpu_to_le16(stat_err_bits));
}
static inline __le64
@@ -32,6 +32,30 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * @rx_desc: Rx 32b flex descriptor with RXDID=2
+ *
+ * The OS and current PF implementation only support stripping a single VLAN tag
+ * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
+ * one is found return the tag, else return 0 to mean no VLAN tag was found.
+ */
+static inline u16
+ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+{
+ u16 stat_err_bits;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag1);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
+
+ return 0;
+}
+
+/**
* ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 546145dd1f02..f2a518a1fd94 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -9,12 +9,14 @@
#define ICE_CHNL_MAX_TC 16
#include "ice_hw_autogen.h"
+#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
+#include "ice_vlan_mode.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -54,6 +56,11 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
+#define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \
+ ICE_DBG_AQ_DESC | \
+ ICE_DBG_AQ_DESC_BUF | \
+ ICE_DBG_AQ_CMD)
+
#define ICE_DBG_USER BIT_ULL(31)
enum ice_aq_res_ids {
@@ -920,6 +927,9 @@ struct ice_hw {
struct udp_tunnel_nic_shared udp_tunnel_shared;
struct udp_tunnel_nic_info udp_tunnel_nic;
+ /* dvm boost update information */
+ struct ice_dvm_table dvm_upd;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
@@ -943,6 +953,7 @@ struct ice_hw {
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
+ u8 dvm_ena;
u16 io_expander_handle;
};
@@ -1008,6 +1019,15 @@ struct ice_hw_port_stats {
u64 fd_sb_match;
};
+enum ice_sw_fwd_act_type {
+ ICE_FWD_TO_VSI = 0,
+ ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
+ ICE_FWD_TO_Q,
+ ICE_FWD_TO_QGRP,
+ ICE_DROP_PACKET,
+ ICE_INVAL_ACT
+};
+
struct ice_aq_get_set_rss_lut_params {
u16 vsi_handle; /* software VSI handle */
u16 lut_size; /* size of the LUT buffer */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
new file mode 100644
index 000000000000..6578059d9479
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+
+/* Public functions which may be accessed by all driver files */
+
+/**
+ * ice_get_vf_by_id - Get pointer to VF by ID
+ * @pf: the PF private structure
+ * @vf_id: the VF ID to locate
+ *
+ * Locate and return a pointer to the VF structure associated with a given ID.
+ * Returns NULL if the ID does not have a valid VF structure associated with
+ * it.
+ *
+ * This function takes a reference to the VF, which must be released by
+ * calling ice_put_vf() once the caller is finished accessing the VF structure
+ * returned.
+ */
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ struct ice_vf *vf;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
+ if (vf->vf_id == vf_id) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_release_vf - Release VF associated with a refcount
+ * @ref: the kref decremented to zero
+ *
+ * Callback function for kref_put to release a VF once its reference count has
+ * hit zero.
+ */
+static void ice_release_vf(struct kref *ref)
+{
+ struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+
+ vf->vf_ops->free(vf);
+}
+
+/**
+ * ice_put_vf - Release a reference to a VF
+ * @vf: the VF structure to decrease reference count on
+ *
+ * Decrease the reference count for a VF, and free the entry if it is no
+ * longer in use.
+ *
+ * This must be called after ice_get_vf_by_id() once the reference to the VF
+ * structure is no longer used. Otherwise, the VF structure will never be
+ * freed.
+ */
+void ice_put_vf(struct ice_vf *vf)
+{
+ kref_put(&vf->refcnt, ice_release_vf);
+}
+
+/**
+ * ice_has_vfs - Return true if the PF has any associated VFs
+ * @pf: the PF private structure
+ *
+ * Return whether or not the PF has any allocated VFs.
+ *
+ * Note that this function only guarantees that there are no VFs at the point
+ * of calling it. It does not guarantee that no more VFs will be added.
+ */
+bool ice_has_vfs(struct ice_pf *pf)
+{
+ /* A simple check that the hash table is not empty does not require
+ * the mutex or rcu_read_lock.
+ */
+ return !hash_empty(pf->vfs.table);
+}
+
+/**
+ * ice_get_num_vfs - Get number of allocated VFs
+ * @pf: the PF private structure
+ *
+ * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
+ * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
+ * the output of this function.
+ */
+u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u16 num_vfs = 0;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ num_vfs++;
+ rcu_read_unlock();
+
+ return num_vfs;
+}
+
+/**
+ * ice_get_vf_vsi - get VF's VSI based on the stored index
+ * @vf: VF used to get VSI
+ */
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ if (vf->lan_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return vf->pf->vsi[vf->lan_vsi_idx];
+}
+
+/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * If the PF has been disabled, there is no need resetting VF until PF is
+ * active again. Similarly, if the VF has been disabled, this means something
+ * else is resetting the VF, so we shouldn't continue.
+ *
+ * Returns true if the caller should consider the VF as disabled whether
+ * because that single VF is explicitly disabled or because the PF is
+ * currently disabled.
+ */
+bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ return (test_bit(ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
+ * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
+ * @vf: The VF being resseting
+ *
+ * The max poll time is about ~800ms, which is about the maximum time it takes
+ * for a VF to be reset and/or a VF driver to be removed.
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(ICE_MAX_VF_RESET_SLEEP_MS);
+ }
+}
+
+/**
+ * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
+ * @vf: VF to check if it's ready to be configured/queried
+ *
+ * The purpose of this function is to make sure the VF is not in reset, not
+ * disabled, and initialized so it can be configured and/or queried by a host
+ * administrator.
+ */
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ if (ice_check_vf_init(vf))
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ * @is_pfr: true if the reset was triggered due to a previous PFR
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+{
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * when it's safe again to access VF's VSI.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
+ */
+ if (!is_pfr)
+ vf->vf_ops->clear_mbx_register(vf);
+
+ vf->vf_ops->trigger_reset_register(vf, is_vflr);
+}
+
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ vf->num_mac = 0;
+ vsi->num_vlan = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
+/**
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
+ *
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
+ */
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_clear_counters(vf);
+ vf->vf_ops->clear_reset_trigger(vf);
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_vsi_rebuild(vsi, true)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
+ }
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return 0;
+}
+
+/**
+ * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ bool is_vf_promisc = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ is_vf_promisc = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_vf_promisc;
+}
+
+/**
+ * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to enable
+ */
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to disable
+ */
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+void ice_reset_all_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!ice_has_vfs(pf))
+ return;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ /* clear all malicious info if the VFs are getting reset */
+ ice_for_each_vf(pf, bkt, vf)
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+ mutex_unlock(&pf->vfs.table_lock);
+ return;
+ }
+
+ /* Begin reset on all VFs at once */
+ ice_for_each_vf(pf, bkt, vf)
+ ice_trigger_vf_reset(vf, true, true);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Now that we've triggered all of the VFs, iterate
+ * the table again and wait for each VF to complete.
+ */
+ ice_for_each_vf(pf, bkt, vf) {
+ if (!vf->vf_ops->poll_reset_status(vf)) {
+ /* Display a warning if at least one VF didn't manage
+ * to reset in time, but continue on with the
+ * operation.
+ */
+ dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
+ break;
+ }
+ }
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VFs since it should be
+ * setup only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_invalidate_vsi(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ vf->vf_ops->post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
+ ice_flush(hw);
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_notify_vf_reset(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct virtchnl_pf_event pfe;
+
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @flags: flags controlling behavior of the reset
+ *
+ * Flags:
+ * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
+ * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
+ * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
+ *
+ * Returns 0 if the VF is currently in reset, if the resets are disabled, or
+ * if the VF resets successfully. Returns an error code if the VF fails to
+ * rebuild.
+ */
+int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_hw *hw;
+ u8 promisc_m;
+ int err = 0;
+ bool rsd;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+
+ if (flags & ICE_VF_RESET_NOTIFY)
+ ice_notify_vf_reset(vf);
+
+ if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (ice_is_vf_disabled(vf)) {
+ dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
+
+ vsi = ice_get_vf_vsi(vf);
+
+ ice_dis_vf_qs(vf);
+
+ /* Call Disable LAN Tx queue AQ whether or not queues are
+ * enabled. This is needed for successful completion of VFR.
+ */
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ rsd = vf->vf_ops->poll_reset_status(vf);
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (ice_vf_is_port_vlan_ena(vf) || vsi->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+ if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
+ dev_err(dev, "disabling promiscuous mode failed\n");
+ }
+
+ ice_eswitch_del_vf_mac_rule(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VF since it should be setup
+ * only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+
+ if (vf->vf_ops->vsi_rebuild(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ ice_eswitch_update_repr(vsi);
+ ice_eswitch_replay_vf_mac_rule(vf);
+
+ /* if the VF has been reset allow it to come up again */
+ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
+ ICE_MAX_SRIOV_VFS, vf->vf_id))
+ dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
+ vf->vf_id);
+
+out_unlock:
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
+ return err;
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/* Private functions only accessed from other virtualization files */
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+void ice_dis_vf_qs(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_all_rx_rings(vsi);
+ ice_set_vf_state_qs_dis(vf);
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @vf: the pointer to the VF to check
+ */
+int ice_check_vf_init(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ else
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+/**
+ * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
+ * @vsi: VSI to enable Tx spoof checking for
+ */
+static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+
+ return ice_cfg_mac_antispoof(vsi, true);
+}
+
+/**
+ * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
+ * @vsi: VSI to disable Tx spoof checking for
+ */
+static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ err = vlan_ops->dis_tx_filtering(vsi);
+ if (err)
+ return err;
+
+ return ice_cfg_mac_antispoof(vsi, false);
+}
+
+/**
+ * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
+ * @vsi: VSI associated to the VF
+ * @enable: whether to enable or disable the spoof checking
+ */
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
+{
+ int err;
+
+ if (enable)
+ err = ice_vsi_ena_spoofchk(vsi);
+ else
+ err = ice_vsi_dis_spoofchk(vsi);
+
+ return err;
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+
+ if (ice_check_vf_init(vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pi->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ u8 broadcast[ETH_ALEN];
+ int status;
+
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ vf->num_mac++;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+ &vf->hw_lan_addr.addr[0], vf->vf_id,
+ status);
+ return status;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ int err;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+ } else {
+ err = ice_vsi_add_vlan_zero(vsi);
+ }
+
+ if (err) {
+ dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+ ice_vf_is_port_vlan_ena(vf) ?
+ ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err)
+ dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+ vf->vf_id, vsi->idx, err);
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int status;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ ice_vf_set_host_trust_cfg(vf);
+
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+ dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+ vf->vf_id);
+
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+ ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+ ice_vf_ctrl_invalidate_vsi(vf);
+ }
+
+ return vsi;
+}
+
+/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
new file mode 100644
index 000000000000..831b667dc5b2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_H_
+#define _ICE_VF_LIB_H_
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <net/devlink.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_type.h"
+#include "ice_virtchnl_fdir.h"
+#include "ice_vsi_vlan_ops.h"
+
+#define ICE_MAX_SRIOV_VFS 256
+
+/* VF resource constraints */
+#define ICE_MAX_RSS_QS_PER_VF 16
+
+struct ice_pf;
+struct ice_vf;
+struct ice_virtchnl_ops;
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+};
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
+ ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
+ ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ ICE_VF_STATES_NBITS
+};
+
+struct ice_time_mac {
+ unsigned long time_modified;
+ u8 addr[ETH_ALEN];
+};
+
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
+/* VF operations */
+struct ice_vf_ops {
+ enum ice_disq_rst_src reset_type;
+ void (*free)(struct ice_vf *vf);
+ void (*clear_mbx_register)(struct ice_vf *vf);
+ void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
+ bool (*poll_reset_status)(struct ice_vf *vf);
+ void (*clear_reset_trigger)(struct ice_vf *vf);
+ int (*vsi_rebuild)(struct ice_vf *vf);
+ void (*post_vsi_rebuild)(struct ice_vf *vf);
+};
+
+/* Virtchnl/SR-IOV config info */
+struct ice_vfs {
+ DECLARE_HASHTABLE(table, 8); /* table of VF entries */
+ struct mutex table_lock; /* Lock for protecting the hash table */
+ u16 num_supported; /* max supported VFs on this PF */
+ u16 num_qps_per; /* number of queue pairs per VF */
+ u16 num_msix_per; /* number of MSI-X vectors per VF */
+ unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
+ DECLARE_BITMAP(malvfs, ICE_MAX_SRIOV_VFS); /* malicious VF indicator */
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct hlist_node entry;
+ struct rcu_head rcu;
+ struct kref refcnt;
+ struct ice_pf *pf;
+
+ /* Used during virtchnl message handling and NDO ops against the VF
+ * that will trigger a VFR
+ */
+ struct mutex cfg_lock;
+
+ u16 vf_id; /* VF ID in the PF space */
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 ctrl_vsi_idx;
+ struct ice_vf_fdir fdir;
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
+ struct virtchnl_ether_addr dev_lan_addr;
+ struct virtchnl_ether_addr hw_lan_addr;
+ struct ice_time_mac legacy_last_added_umac;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
+ struct virtchnl_vlan_caps vlan_v2_caps;
+ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
+ u8 trusted:1;
+ u8 spoofchk:1;
+ u8 link_forced:1;
+ u8 link_up:1; /* only valid if VF link is forced */
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
+ unsigned long vf_caps; /* VF's adv. capabilities */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+ u16 num_mac;
+ u16 num_vf_qs; /* num of queue configured per VF */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
+ DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+ const struct ice_virtchnl_ops *virtchnl_ops;
+ const struct ice_vf_ops *vf_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
+};
+
+/* Flags for controlling behavior of ice_reset_vf */
+enum ice_vf_reset_flags {
+ ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
+ ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
+ ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
+};
+
+static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.vid;
+}
+
+static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.prio;
+}
+
+static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
+{
+ return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
+}
+
+static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.tpid;
+}
+
+/* VF Hash Table access functions
+ *
+ * These functions provide abstraction for interacting with the VF hash table.
+ * In general, direct access to the hash table should be avoided outside of
+ * these functions where possible.
+ *
+ * The VF entries in the hash table are protected by reference counting to
+ * track lifetime of accesses from the table. The ice_get_vf_by_id() function
+ * obtains a reference to the VF structure which must be dropped by using
+ * ice_put_vf().
+ */
+
+/**
+ * ice_for_each_vf - Iterate over each VF entry
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop.
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under the table_lock mutex for the entire
+ * loop. Use this iterator if your loop is long or if it might sleep.
+ */
+#define ice_for_each_vf(pf, bkt, vf) \
+ hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
+
+/**
+ * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop.
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under rcu_read_lock() for the entire loop.
+ * Only use this iterator if your loop is short and you can guarantee it does
+ * not sleep.
+ */
+#define ice_for_each_vf_rcu(pf, bkt, vf) \
+ hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
+
+#ifdef CONFIG_PCI_IOV
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+void ice_put_vf(struct ice_vf *vf);
+bool ice_has_vfs(struct ice_pf *pf);
+u16 ice_get_num_vfs(struct ice_pf *pf);
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+bool ice_is_vf_disabled(struct ice_vf *vf);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int ice_reset_vf(struct ice_vf *vf, u32 flags);
+void ice_reset_all_vfs(struct ice_pf *pf);
+#else /* CONFIG_PCI_IOV */
+static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ return NULL;
+}
+
+static inline void ice_put_vf(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_has_vfs(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ return 0;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ return 0;
+}
+
+static inline void ice_reset_all_vfs(struct ice_pf *pf)
+{
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
new file mode 100644
index 000000000000..15887e772c76
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_PRIVATE_H_
+#define _ICE_VF_LIB_PRIVATE_H_
+
+#include "ice_vf_lib.h"
+
+/* This header file is for exposing functions in ice_vf_lib.c to other files
+ * which are also conditionally compiled depending on CONFIG_PCI_IOV.
+ * Functions which may be used by other files should be exposed as part of
+ * ice_vf_lib.h
+ *
+ * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and
+ * thus this header must not be included by .c files which may be compiled
+ * with CONFIG_PCI_IOV disabled.
+ *
+ * To avoid this, only include this header file directly within .c files that
+ * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block.
+ */
+
+#ifndef CONFIG_PCI_IOV
+#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
+#endif
+
+void ice_dis_vf_qs(struct ice_vf *vf);
+int ice_check_vf_init(struct ice_vf *vf);
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
+bool ice_is_vf_trusted(struct ice_vf *vf);
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
+bool ice_is_vf_link_up(struct ice_vf *vf);
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_set_initialized(struct ice_vf *vf);
+
+#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
new file mode 100644
index 000000000000..fc8c93fa4455
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = ICE_LINK_SPEED_50000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = ICE_LINK_SPEED_100000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ case ICE_AQ_LINK_SPEED_50GB:
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This can be
+ * done via ice_mbx_reset_snapshot().
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a VFId is received which
+ * is passed to the state handler. The boolean output is_malvf of the state
+ * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
+ * whether this VF is malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator. The caller can invoke ice_mbx_report_malvf()
+ * to help determine if a malicious VF is to be reported or not. This function
+ * requires the caller to maintain a global bitmap to track all malicious VFs
+ * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
+ * to be malicious by ice_mbx_vf_state_handler().
+ *
+ * 6. The global bitmap maintained by PF can be cleared completely if PF is in
+ * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
+ * When a VF is shut down and brought back up, we assume that the new VF
+ * brought up is not malicious and hence report it if found malicious.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ *
+ * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
+ * when driver is unloaded.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_id: relative virtual function ID
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static int
+ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ if (vf_id >= snap->mbx_vf.vfcntr_len)
+ return -EIO;
+
+ /* increment the message count in the VF array */
+ snap->mbx_vf.vf_cntr[vf_id]++;
+
+ if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return 0;
+}
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ *
+ * Reset the mailbox snapshot structure and clear VF counter array.
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ u32 vfcntr_len;
+
+ if (!snap || !snap->mbx_vf.vf_cntr)
+ return;
+
+ /* Clear VF counters. */
+ vfcntr_len = snap->mbx_vf.vfcntr_len;
+ if (vfcntr_len)
+ memset(snap->mbx_vf.vf_cntr, 0,
+ (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
+
+ /* Reset mailbox snapshot for a new capture. */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_id: relative virtual function (VF) ID
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw,
+ struct ice_mbx_data *mbx_data, u16 vf_id,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ int status = 0;
+
+ if (!is_malvf || !mbx_data)
+ return -EINVAL;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ *is_malvf = false;
+
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return -EINVAL;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return -EINVAL;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = -EIO;
+ }
+
+ snap_buf->state = new_state;
+
+ return status;
+}
+
+/**
+ * ice_mbx_report_malvf - Track and note malicious VF
+ * @hw: pointer to the HW struct
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ * @report_malvf: boolean to indicate if malicious VF must be reported
+ *
+ * This function will update a bitmap that keeps track of the malicious
+ * VFs attached to the PF. A malicious VF must be reported only once if
+ * discovered between VF resets or loading so the function checks
+ * the input vf_id against the bitmap to verify if the VF has been
+ * detected in any previous mailbox iterations.
+ */
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf)
+{
+ if (!all_malvfs || !report_malvf)
+ return -EINVAL;
+
+ *report_malvf = false;
+
+ if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ return -EINVAL;
+
+ if (vf_id >= bitmap_len)
+ return -EIO;
+
+ /* If the vf_id is found in the bitmap set bit and boolean to true */
+ if (!test_and_set_bit(vf_id, all_malvfs))
+ *report_malvf = true;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
+ * @snap: pointer to the mailbox snapshot structure
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ *
+ * In case of a VF reset, this function can be called to clear
+ * the bit corresponding to the VF ID in the bitmap tracking all
+ * malicious VFs attached to the PF. The function also clears the
+ * VF counter array at the index of the VF ID. This is to ensure
+ * that the new VF loaded is not considered malicious before going
+ * through the overflow detection algorithm.
+ */
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id)
+{
+ if (!snap || !all_malvfs)
+ return -EINVAL;
+
+ if (bitmap_len < snap->mbx_vf.vfcntr_len)
+ return -EINVAL;
+
+ /* Ensure VF ID value is not larger than bitmap or VF counter length */
+ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
+ return -EIO;
+
+ /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
+ clear_bit(vf_id, all_malvfs);
+
+ /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
+ * This is to ensure that if a VF is unloaded and a new one brought back
+ * up with the same VF ID for a snapshot currently in traversal or detect
+ * state the counter for that VF ID does not increment on top of existing
+ * values in the mailbox overflow detection algorithm.
+ */
+ snap->mbx_vf.vf_cntr[vf_id] = 0;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ * @vf_count: number of VFs allocated on a PF
+ *
+ * Clear the mailbox snapshot structure and allocate memory
+ * for the VF counter array based on the number of VFs allocated
+ * on that PF.
+ *
+ * Assumption: This function will assume ice_get_caps() has already been
+ * called to ensure that the vf_count can be compared against the number
+ * of VFs supported as defined in the functional capabilities of the device.
+ */
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Ensure that the number of VFs allocated is non-zero and
+ * is not greater than the number of supported VFs defined in
+ * the functional capabilities of the PF.
+ */
+ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
+ return -EINVAL;
+
+ snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
+ sizeof(*snap->mbx_vf.vf_cntr),
+ GFP_KERNEL);
+ if (!snap->mbx_vf.vf_cntr)
+ return -ENOMEM;
+
+ /* Setting the VF counter length to the number of allocated
+ * VFs for given PF's functional capabilities.
+ */
+ snap->mbx_vf.vfcntr_len = vf_count;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ return 0;
+}
+
+/**
+ * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and free the VF counter array.
+ */
+void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Free VF counter array and reset VF counter length */
+ devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
+ snap->mbx_vf.vfcntr_len = 0;
+
+ /* Clear mbx_buf in the mailbox snaphot structure */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
new file mode 100644
index 000000000000..582716e6d5f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VF_MBX_H_
+#define _ICE_VF_MBX_H_
+
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+
+#ifdef CONFIG_PCI_IOV
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ u16 vf_id, bool *is_mal_vf);
+int
+ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+int
+ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf);
+#else /* CONFIG_PCI_IOV */
+static inline int
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..5ecc0ee9a78e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sriov.h"
+
+static int
+noop_vlan_arg(struct ice_vsi __always_unused *vsi,
+ struct ice_vlan __always_unused *vlan)
+{
+ return 0;
+}
+
+static int
+noop_vlan(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
+ * @vsi: VF's VSI being configured
+ *
+ * If Double VLAN Mode (DVM) is enabled, assume that the VF supports the new
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2 capability and set up the VLAN ops accordingly.
+ * If SVM is enabled maintain the same level of VLAN support previous to
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2.
+ */
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* outer VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ /* inner VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+ } else {
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ }
+}
+
+/**
+ * ice_vf_vsi_cfg_dvm_legacy_vlan_mode - Config VLAN mode for old VFs in DVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Double VLAN Mode (DVM) is enabled, there
+ * is not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * This function sets up the VF VSI's inner and outer ice_vsi_vlan_ops and also
+ * initializes software only VLAN mode (i.e. allow all VLANs). Also, use no-op
+ * implementations for any functions that may be called during the lifetime of
+ * the VF so these methods do nothing and succeed.
+ */
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_vf *vf = vsi->vf;
+ struct device *dev;
+
+ if (WARN_ON(!vf))
+ return;
+
+ dev = ice_pf_to_dev(vf->pf);
+
+ if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* Rx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ /* Don't fail when attempting to enable Rx VLAN filtering */
+ vlan_ops->ena_rx_filtering = noop_vlan;
+
+ /* Tx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+ /* Don't fail when attempting to enable Tx VLAN filtering */
+ vlan_ops->ena_tx_filtering = noop_vlan;
+
+ if (vlan_ops->dis_rx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Rx VLAN filtering for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+ if (vlan_ops->dis_tx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Tx VLAN filtering for old VF without VIRTHCNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All outer VLAN offloads must be disabled */
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All inner VLAN offloads must be disabled */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+}
+
+/**
+ * ice_vf_vsi_cfg_svm_legacy_vlan_mode - Config VLAN mode for old VFs in SVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Single VLAN Mode (SVM) is enabled, there is
+ * not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * All of the normal SVM VLAN ops are identical for this case. However, by
+ * default Rx VLAN filtering should be turned off by default in this case.
+ */
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ if (vsi->inner_vlan_ops.dis_rx_filtering(vsi))
+ dev_dbg(ice_pf_to_dev(vf->pf), "Failed to disable Rx VLAN filtering for old VF with VIRTCHNL_VF_OFFLOAD_VLAN support\n");
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..875a4e615f39
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_VSI_VLAN_OPS_H_
+#define _ICE_VF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi);
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
+
+#ifdef CONFIG_PCI_IOV
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+#else
+static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1be3cd4b2bef..3f1a63815bac 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1,15 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (C) 2022, Intel Corporation. */
+#include "ice_virtchnl.h"
+#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_dcb_lib.h"
-#include "ice_flow.h"
-#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
#include "ice_flex_pipe.h"
+#include "ice_dcb_lib.h"
#define FIELD_SELECTOR(proto_hdr_field) \
BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
@@ -164,45 +166,6 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
};
/**
- * ice_get_vf_vsi - get VF's VSI based on the stored index
- * @vf: VF used to get VSI
- */
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return vf->pf->vsi[vf->lan_vsi_idx];
-}
-
-/**
- * ice_validate_vf_id - helper to check if VF ID is valid
- * @pf: pointer to the PF structure
- * @vf_id: the ID of the VF to check
- */
-static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
-{
- /* vf_id range is only valid for 0-255, and should always be unsigned */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * ice_check_vf_init - helper to check if VF init complete
- * @pf: pointer to the PF structure
- * @vf: the pointer to the VF to check
- */
-static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
-{
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
- vf->vf_id);
- return -EBUSY;
- }
- return 0;
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -215,11 +178,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ struct ice_vf *vf;
+ unsigned int bkt;
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
@@ -231,6 +194,7 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
msglen, NULL);
}
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
@@ -259,39 +223,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
- * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
- * @vf: the VF to check
- *
- * Returns true if the VF has no Rx and no Tx queues enabled and returns false
- * otherwise
- */
-static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
-{
- return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
- !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
-}
-
-/**
- * ice_is_vf_link_up - check if the VF's link is up
- * @vf: VF to check if link is up
- */
-static bool ice_is_vf_link_up(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- if (ice_check_vf_init(pf, vf))
- return false;
-
- if (ice_vf_has_no_qs_ena(vf))
- return false;
- else if (vf->link_forced)
- return vf->link_up;
- else
- return pf->hw.port_info->phy.link_info.link_info &
- ICE_AQ_LINK_UP;
-}
-
-/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -317,1367 +248,18 @@ void ice_vc_notify_vf_link_state(struct ice_vf *vf)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
- * @vf: VF to remove access to VSI for
- */
-static void ice_vf_invalidate_vsi(struct ice_vf *vf)
-{
- vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(ice_get_vf_vsi(vf));
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
- * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
- * @vf: VF that control VSI is being invalidated on
- */
-static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
-{
- vf->ctrl_vsi_idx = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
- * @vf: VF that control VSI is being released on
- */
-static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
- ice_vf_ctrl_invalidate_vsi(vf);
-}
-
-/**
- * ice_free_vf_res - Free a VF's resources
- * @vf: pointer to the VF info
- */
-static void ice_free_vf_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- int i, last_vector_idx;
-
- /* First, disable VF's configuration API to prevent OS from
- * accessing the VF's VSI after it's freed or invalidated.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_vf_fdir_exit(vf);
- /* free VF control VSI */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- /* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx != ICE_NO_VSI) {
- ice_vf_vsi_release(vf);
- vf->num_mac = 0;
- }
-
- last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
-
- /* clear VF MDD event information */
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-
- /* Disable interrupts so that VF starts in a known state */
- for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
- wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
- ice_flush(&pf->hw);
- }
- /* reset some of the state variables keeping track of the resources */
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_mappings
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_mappings(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- vsi = ice_get_vf_vsi(vf);
-
- dev = ice_pf_to_dev(pf);
- wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
-
- first = vf->first_vector_idx;
- last = first + pf->num_msix_per_vf - 1;
- for (v = first; v <= last; v++) {
- u32 reg;
-
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
-}
-
-/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- struct ice_res_tracker *res;
-
- if (!pf)
- return -EINVAL;
-
- res = pf->irq_tracker;
- if (!res)
- return -EINVAL;
-
- /* give back irq_tracker resources used */
- WARN_ON(pf->sriov_base_vector < res->num_entries);
-
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
- /* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_qs - Disable the VF queues
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_qs(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_all_rx_rings(vsi);
- ice_set_vf_state_qs_dis(vf);
-}
-
-/**
- * ice_free_vfs - Free all VFs
- * @pf: pointer to the PF structure
- */
-void ice_free_vfs(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- unsigned int tmp, i;
-
- if (!pf->vf)
- return;
-
- ice_eswitch_release(pf);
-
- while (test_and_set_bit(ICE_VF_DIS, pf->state))
- usleep_range(1000, 2000);
-
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
-
- tmp = pf->num_alloc_vfs;
- pf->num_qps_per_vf = 0;
- pf->num_alloc_vfs = 0;
- for (i = 0; i < tmp; i++) {
- struct ice_vf *vf = &pf->vf[i];
-
- mutex_lock(&vf->cfg_lock);
-
- ice_dis_vf_qs(vf);
-
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- /* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(vf);
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- ice_free_vf_res(vf);
- }
-
- mutex_unlock(&vf->cfg_lock);
-
- mutex_destroy(&vf->cfg_lock);
- }
-
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
-
- /* This check is for when the driver is unloaded while VFs are
- * assigned. Setting the number of VFs to 0 through sysfs is caught
- * before this function ever gets called.
- */
- if (!pci_vfs_assigned(pf->pdev)) {
- unsigned int vf_id;
-
- /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
- * work correctly when SR-IOV gets re-enabled.
- */
- for (vf_id = 0; vf_id < tmp; vf_id++) {
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- }
- }
-
- /* clear malicious info if the VFs are getting released */
- for (i = 0; i < tmp; i++)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
- ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- i);
-
- clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
-}
-
-/**
- * ice_trigger_vf_reset - Reset a VF on HW
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- * @is_pfr: true if the reset was triggered due to a previous PFR
- *
- * Trigger hardware to start a reset for a particular VF. Expects the caller
- * to wait the proper amount of time to allow hardware to reset the VF before
- * it cleans up and restores VF functionality.
- */
-static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
-{
- struct ice_pf *pf = vf->pf;
- u32 reg, reg_idx, bit_idx;
- unsigned int vf_abs_id, i;
- struct device *dev;
- struct ice_hw *hw;
-
- dev = ice_pf_to_dev(pf);
- hw = &pf->hw;
- vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* Inform VF that it is no longer active, as a warning */
- clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
- /* Disable VF's configuration API during reset. The flag is re-enabled
- * when it's safe again to access VF's VSI.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
- * needs to clear them in the case of VFR/VFLR. If this is done for
- * PFR, it can mess up VF resets because the VF driver may already
- * have started cleanup by the time we get here.
- */
- if (!is_pfr) {
- wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
- wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
- }
-
- /* In the case of a VFLR, the HW has already reset the VF and we
- * just need to clean up, so don't hit the VFRTRIG register.
- */
- if (!is_vflr) {
- /* reset VF using VPGEN_VFRTRIG reg */
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg |= VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- }
- /* clear the VFLR bit in GLGEN_VFLRSTAT */
- reg_idx = (vf_abs_id) / 32;
- bit_idx = (vf_abs_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- ice_flush(hw);
-
- wr32(hw, PF_PCI_CIAA,
- VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
- reg = rd32(hw, PF_PCI_CIAD);
- /* no transactions pending so stop polling */
- if ((reg & VF_TRANS_PENDING_M) == 0)
- break;
-
- dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
- udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
- }
-}
-
-/**
- * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
- * @vsi: the VSI to update
- * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
- * @enable: true for enable PVID false for disable
- */
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_aqc_vsi_props *info;
- struct ice_vsi_ctx *ctxt;
- int ret;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
- info = &ctxt->info;
- if (enable) {
- info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR;
- info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
- ICE_AQ_VSI_VLAN_MODE_ALL;
- info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
-
- info->pvid = cpu_to_le16(pvid_info);
- info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-
- ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (ret) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
- goto out;
- }
-
- vsi->info.vlan_flags = info->vlan_flags;
- vsi->info.sw_flags2 = info->sw_flags2;
- vsi->info.pvid = info->pvid;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vf_get_port_info - Get the VF's port info structure
- * @vf: VF used to get the port info structure for
- */
-static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
-{
- return vf->pf->hw.port_info;
-}
-
-/**
- * ice_vf_vsi_setup - Set up a VF VSI
- * @vf: VF to setup VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
-
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
- ice_vf_invalidate_vsi(vf);
- return NULL;
- }
-
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- return vsi;
-}
-
-/**
- * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
- * @vf: VF to setup control VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
- ice_vf_ctrl_invalidate_vsi(vf);
- }
-
- return vsi;
-}
-
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
- return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- int err;
-
- if (vf->min_tx_rate) {
- err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
- vf->min_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- if (vf->max_tx_rate) {
- err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
- vf->max_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u16 vlan_id = 0;
- int err;
-
- if (vf->port_vlan_info) {
- err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (err) {
- dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
- }
-
- /* vlan_id will either be 0 or the port VLAN number */
- err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
- vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
- err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u8 broadcast[ETH_ALEN];
- int status;
-
- if (ice_is_eswitch_mode_switchdev(vf->pf))
- return 0;
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
- vf->vf_id, status);
- return status;
- }
-
- vf->num_mac++;
-
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
- status);
- return status;
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
- }
-
- return 0;
-}
-
-/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
- if (vf->trusted)
- set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
- else
- clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
- * @vf: VF to enable MSIX mappings for
- *
- * Some of the registers need to be indexed/configured using hardware global
- * device values and other registers need 0-based values, which represent PF
- * based values.
- */
-static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
-{
- int device_based_first_msix, device_based_last_msix;
- int pf_based_first_msix, pf_based_last_msix, v;
- struct ice_pf *pf = vf->pf;
- int device_based_vf_id;
- struct ice_hw *hw;
- u32 reg;
-
- hw = &pf->hw;
- pf_based_first_msix = vf->first_vector_idx;
- pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
-
- device_based_first_msix = pf_based_first_msix +
- pf->hw.func_caps.common_cap.msix_vector_first_id;
- device_based_last_msix =
- (device_based_first_msix + pf->num_msix_per_vf) - 1;
- device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
- wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
-
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
-
- /* map the interrupts to its functions */
- for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- /* Map mailbox interrupt to VF MSI-X vector 0 */
- wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
- * @vf: VF to enable the mappings for
- * @max_txq: max Tx queues allowed on the VF's VSI
- * @max_rxq: max Rx queues allowed on the VF's VSI
- */
-static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
-
- /* VF Tx queues allocation */
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Tx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
- }
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
-
- /* VF Rx queues allocation */
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Rx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
- }
-}
-
-/**
- * ice_ena_vf_mappings - enable VF MSIX and queue mapping
- * @vf: pointer to the VF structure
- */
-static void ice_ena_vf_mappings(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_ena_vf_msix_mappings(vf);
- ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
-}
-
-/**
- * ice_determine_res
- * @pf: pointer to the PF structure
- * @avail_res: available resources in the PF structure
- * @max_res: maximum resources that can be given per VF
- * @min_res: minimum resources that can be given per VF
- *
- * Returns non-zero value if resources (queues/vectors) are available or
- * returns zero if PF cannot accommodate for all num_alloc_vfs.
- */
-static int
-ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
-{
- bool checked_min_res = false;
- int res;
-
- /* start by checking if PF can assign max number of resources for
- * all num_alloc_vfs.
- * if yes, return number per VF
- * If no, divide by 2 and roundup, check again
- * repeat the loop till we reach a point where even minimum resources
- * are not available, in that case return 0
- */
- res = max_res;
- while ((res >= min_res) && !checked_min_res) {
- int num_all_res;
-
- num_all_res = pf->num_alloc_vfs * res;
- if (num_all_res <= avail_res)
- return res;
-
- if (res == min_res)
- checked_min_res = true;
-
- res = DIV_ROUND_UP(res, 2);
- }
- return 0;
-}
-
-/**
- * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
- * @vf: VF to calculate the register index for
- * @q_vector: a q_vector associated to the VF
- */
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
-{
- struct ice_pf *pf;
-
- if (!vf || !q_vector)
- return -EINVAL;
-
- pf = vf->pf;
-
- /* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
- q_vector->v_idx + 1;
-}
-
-/**
- * ice_get_max_valid_res_idx - Get the max valid resource index
- * @res: pointer to the resource to find the max valid index for
- *
- * Start from the end of the ice_res_tracker and return right when we find the
- * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
- * valid for SR-IOV because it is the only consumer that manipulates the
- * res->end and this is always called when res->end is set to res->num_entries.
- */
-static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
-{
- int i;
-
- if (!res)
- return -EINVAL;
-
- for (i = res->num_entries - 1; i >= 0; i--)
- if (res->list[i] & ICE_RES_VALID_BIT)
- return i;
-
- return 0;
-}
-
-/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = pf->irq_tracker->num_entries;
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
- * ice_set_per_vf_res - check if vectors and queues are available
- * @pf: pointer to the PF structure
- *
- * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
- * get more vectors and can enable more queues per VF. Note that this does not
- * grab any vectors from the SW pool already allocated. Also note, that all
- * vector counts include one for each VF's miscellaneous interrupt vector
- * (i.e. OICR).
- *
- * Minimum VFs - 2 vectors, 1 queue pair
- * Small VFs - 5 vectors, 4 queue pairs
- * Medium VFs - 17 vectors, 16 queue pairs
- *
- * Second, determine number of queue pairs per VF by starting with a pre-defined
- * maximum each VF supports. If this is not possible, then we adjust based on
- * queue pairs available on the device.
- *
- * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
- * by each VF during VF initialization and reset.
- */
-static int ice_set_per_vf_res(struct ice_pf *pf)
-{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- int msix_avail_per_vf, msix_avail_for_sriov;
- struct device *dev = ice_pf_to_dev(pf);
- u16 num_msix_per_vf, num_txq, num_rxq;
-
- if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
- return -EINVAL;
-
- /* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- pf->irq_tracker->num_entries;
- msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
- if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
- } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
- num_msix_per_vf = ICE_MIN_INTR_PER_VF;
- } else {
- dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
- msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
- pf->num_alloc_vfs);
- return -EIO;
- }
-
- /* determine queue resources per VF */
- num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq) {
- dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
- ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
- return -EIO;
- }
-
- if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
- pf->num_alloc_vfs);
- return -EINVAL;
- }
-
- /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
- pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
- pf->num_msix_per_vf = num_msix_per_vf;
- dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
- pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
-
- return 0;
-}
-
-/**
- * ice_clear_vf_reset_trigger - enable VF to access hardware
- * @vf: VF to enabled hardware access for
- */
-static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
-{
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- ice_flush(hw);
-}
-
-static int
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int status;
-
- if (vf->port_vlan_info)
- status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info & VLAN_VID_MASK);
- else if (vsi->num_vlan > 1)
- status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
- else
- status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
- if (status && status != -EEXIST) {
- dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
- vf->vf_id, status);
- return status;
- }
-
- return 0;
-}
-
-static int
-ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int status;
-
- if (vf->port_vlan_info)
- status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info & VLAN_VID_MASK);
- else if (vsi->num_vlan > 1)
- status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
- else
- status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
-
- if (status && status != -ENOENT) {
- dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
- vf->vf_id, status);
- return status;
- }
-
- return 0;
-}
-
-static void ice_vf_clear_counters(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- vf->num_mac = 0;
- vsi->num_vlan = 0;
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-}
-
-/**
- * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
- * @vf: VF to perform pre VSI rebuild tasks
- *
- * These tasks are items that don't need to be amortized since they are most
- * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
- */
-static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
-{
- ice_vf_clear_counters(vf);
- ice_clear_vf_reset_trigger(vf);
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int status;
-
- if (!vsi->agg_node)
- return;
-
- dev = ice_pf_to_dev(pf);
- if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
- dev_dbg(dev,
- "agg_id %u already has reached max_num_vsis %u\n",
- vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
- return;
- }
-
- status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
- vsi->idx, vsi->tc_cfg.ena_tc);
- if (status)
- dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
- vsi->idx, vsi->agg_node->agg_id);
- else
- vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vf_set_host_trust_cfg(vf);
-
- if (ice_vf_rebuild_host_mac_cfg(vf))
- dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_vlan_cfg(vf))
- dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_tx_rate_cfg(vf))
- dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
- vf->vf_id);
-
- /* rebuild aggregator node config for main VF VSI */
- ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
- * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
- * @vf: VF to release and setup the VSI for
- *
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
- */
-static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
-{
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf))
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_vsi - rebuild the VF's VSI
- * @vf: VF to rebuild the VSI for
- *
- * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
- * host, PFR, CORER, etc.).
- */
-static int ice_vf_rebuild_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_pf *pf = vf->pf;
-
- if (ice_vsi_rebuild(vsi, true)) {
- dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
- vf->vf_id);
- return -EIO;
- }
- /* vsi->idx will remain the same in this case so don't update
- * vf->lan_vsi_idx
- */
- vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
-
- return 0;
-}
-
-/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-static void ice_vf_set_initialized(struct ice_vf *vf)
-{
- ice_set_vf_state_qs_dis(vf);
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-}
-
-/**
- * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
- * @vf: VF to perform tasks on
- */
-static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
-
- ice_vf_rebuild_host_cfg(vf);
-
- ice_vf_set_initialized(vf);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
-}
-
-/**
- * ice_reset_all_vfs - reset all allocated VFs in one go
- * @pf: pointer to the PF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * First, tell the hardware to reset each VF, then do all the waiting in one
- * chunk, and finally finish restoring each VF after the wait. This is useful
- * during PF routines which need to reset all VFs, as otherwise it must perform
- * these resets in a serialized fashion.
- *
- * Returns true if any VFs were reset, and false otherwise.
- */
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf;
- int v, i;
-
- /* If we don't have any VFs, then there is nothing to reset */
- if (!pf->num_alloc_vfs)
- return false;
-
- /* clear all malicious info if the VFs are getting reset */
- ice_for_each_vf(pf, i)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- /* If VFs have been disabled, there is no need to reset */
- if (test_and_set_bit(ICE_VF_DIS, pf->state))
- return false;
-
- /* Begin reset on all VFs at once */
- ice_for_each_vf(pf, v)
- ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
-
- /* HW requires some time to make sure it can flush the FIFO for a VF
- * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
- * sequence to make sure that it has completed. We'll keep track of
- * the VFs using a simple iterator that increments once that VF has
- * finished resetting.
- */
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- /* Check each VF in sequence */
- while (v < pf->num_alloc_vfs) {
- u32 reg;
-
- vf = &pf->vf[v];
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
- /* only delay if the check failed */
- usleep_range(10, 20);
- break;
- }
-
- /* If the current VF has finished resetting, move on
- * to the next VF in sequence.
- */
- v++;
- }
- }
-
- /* Display a warning if at least one VF didn't manage to reset in
- * time, but continue on with the operation.
- */
- if (v < pf->num_alloc_vfs)
- dev_warn(dev, "VF reset check timeout\n");
-
- /* free VF resources to begin resetting the VSI state */
- ice_for_each_vf(pf, v) {
- vf = &pf->vf[v];
-
- mutex_lock(&vf->cfg_lock);
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VFs since it should be
- * setup only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_invalidate_vsi(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi(vf);
- ice_vf_post_vsi_rebuild(vf);
-
- mutex_unlock(&vf->cfg_lock);
- }
-
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
- ice_flush(hw);
- clear_bit(ICE_VF_DIS, pf->state);
-
- return true;
-}
-
-/**
- * ice_is_vf_disabled
- * @vf: pointer to the VF info
- *
- * Returns true if the PF or VF is disabled, false otherwise.
- */
-bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again. Similarly, if the VF has been disabled, this
- * means something else is resetting the VF, so we shouldn't continue.
- * Otherwise, set disable VF state bit for actual reset, and continue.
- */
- return (test_bit(ICE_VF_DIS, pf->state) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states));
-}
-
-/**
- * ice_reset_vf - Reset a particular VF
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * Returns true if the VF is currently in reset, resets successfully, or resets
- * are disabled and false otherwise.
- */
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_hw *hw;
- bool rsd = false;
- u8 promisc_m;
- u32 reg;
- int i;
-
- lockdep_assert_held(&vf->cfg_lock);
-
- dev = ice_pf_to_dev(pf);
-
- if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
- dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
- vf->vf_id);
- return true;
- }
-
- if (ice_is_vf_disabled(vf)) {
- dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
- vf->vf_id);
- return true;
- }
-
- /* Set VF disable bit state here, before triggering reset */
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- ice_trigger_vf_reset(vf, is_vflr, false);
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_dis_vf_qs(vf);
-
- /* Call Disable LAN Tx queue AQ whether or not queues are
- * enabled. This is needed for successful completion of VFR.
- */
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
-
- hw = &pf->hw;
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 10; i++) {
- /* VF reset requires driver to first reset the VF and then
- * poll the status register to make sure that the reset
- * completed successfully.
- */
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (reg & VPGEN_VFRSTAT_VFRD_M) {
- rsd = true;
- break;
- }
-
- /* only sleep if the reset is not done */
- usleep_range(10, 20);
- }
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- /* Display a warning if VF didn't manage to reset in time, but need to
- * continue on with the operation.
- */
- if (!rsd)
- dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
-
- /* disable promiscuous modes in case they were enabled
- * ignore any error if disabling process failed
- */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
-
- ice_eswitch_del_vf_mac_rule(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VF since it should be setup
- * only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
-
- if (ice_vf_rebuild_vsi_with_release(vf)) {
- dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
- return false;
- }
-
- ice_vf_post_vsi_rebuild(vf);
- vsi = ice_get_vf_vsi(vf);
- ice_eswitch_update_repr(vsi);
- ice_eswitch_replay_vf_mac_rule(vf);
-
- /* if the VF has been reset allow it to come up again */
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- return true;
-}
-
-/**
* ice_vc_notify_link_state - Inform all VFs on a PF of link status
* @pf: pointer to the PF structure
*/
void ice_vc_notify_link_state(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i)
- ice_vc_notify_vf_link_state(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_vc_notify_vf_link_state(vf);
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
@@ -1690,7 +272,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
{
struct virtchnl_pf_event pfe;
- if (!pf->num_alloc_vfs)
+ if (!ice_has_vfs(pf))
return;
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
@@ -1700,462 +282,6 @@ void ice_vc_notify_reset(struct ice_pf *pf)
}
/**
- * ice_vc_notify_vf_reset - Notify VF of a reset event
- * @vf: pointer to the VF structure
- */
-static void ice_vc_notify_vf_reset(struct ice_vf *vf)
-{
- struct virtchnl_pf_event pfe;
- struct ice_pf *pf;
-
- if (!vf)
- return;
-
- pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return;
-
- /* Bail out if VF is in disabled state, neither initialized, nor active
- * state - otherwise proceed with notifications
- */
- if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return;
-
- pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
- NULL);
-}
-
-/**
- * ice_init_vf_vsi_res - initialize/setup VF VSI resources
- * @vf: VF to initialize/setup the VSI for
- *
- * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
- * VF VSI's broadcast filter and is only used during initial VF creation.
- */
-static int ice_init_vf_vsi_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
-
- dev = ice_pf_to_dev(pf);
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
- vf->vf_id, err);
- goto release_vsi;
- }
-
- vf->num_mac = 1;
-
- return 0;
-
-release_vsi:
- ice_vf_vsi_release(vf);
- return err;
-}
-
-/**
- * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
- * @pf: PF the VFs are associated with
- */
-static int ice_start_vfs(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- int retval, i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_clear_vf_reset_trigger(vf);
-
- retval = ice_init_vf_vsi_res(vf);
- if (retval) {
- dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
- vf->vf_id, retval);
- goto teardown;
- }
-
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
- }
-
- ice_flush(hw);
- return 0;
-
-teardown:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_dis_vf_mappings(vf);
- ice_vf_vsi_release(vf);
- }
-
- return retval;
-}
-
-/**
- * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
- * @pf: PF holding reference to all VFs for default configuration
- */
-static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- vf->pf = pf;
- vf->vf_id = i;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
- vf->spoofchk = true;
- vf->num_vf_qs = pf->num_qps_per_vf;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
-
- mutex_init(&vf->cfg_lock);
- }
-}
-
-/**
- * ice_alloc_vfs - allocate num_vfs in the PF structure
- * @pf: PF to store the allocated VFs in
- * @num_vfs: number of VFs to allocate
- */
-static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
-{
- struct ice_vf *vfs;
-
- vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
- GFP_KERNEL);
- if (!vfs)
- return -ENOMEM;
-
- pf->vf = vfs;
- pf->num_alloc_vfs = num_vfs;
-
- return 0;
-}
-
-/**
- * ice_ena_vfs - enable VFs so they are ready to be used
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to enable
- */
-static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int ret;
-
- /* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
- ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
- set_bit(ICE_OICR_INTR_DIS, pf->state);
- ice_flush(hw);
-
- ret = pci_enable_sriov(pf->pdev, num_vfs);
- if (ret) {
- pf->num_alloc_vfs = 0;
- goto err_unroll_intr;
- }
-
- ret = ice_alloc_vfs(pf, num_vfs);
- if (ret)
- goto err_pci_disable_sriov;
-
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
- num_vfs);
- ret = -ENOSPC;
- goto err_unroll_sriov;
- }
-
- ice_set_dflt_settings_vfs(pf);
-
- if (ice_start_vfs(pf)) {
- dev_err(dev, "Failed to start VF(s)\n");
- ret = -EAGAIN;
- goto err_unroll_sriov;
- }
-
- clear_bit(ICE_VF_DIS, pf->state);
-
- ret = ice_eswitch_configure(pf);
- if (ret)
- goto err_unroll_sriov;
-
- /* rearm global interrupts */
- if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
-
- return 0;
-
-err_unroll_sriov:
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
- pf->num_alloc_vfs = 0;
-err_pci_disable_sriov:
- pci_disable_sriov(pf->pdev);
-err_unroll_intr:
- /* rearm interrupts here */
- ice_irq_dynamic_ena(hw, NULL, NULL);
- clear_bit(ICE_OICR_INTR_DIS, pf->state);
- return ret;
-}
-
-/**
- * ice_pci_sriov_ena - Enable or change number of VFs
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to allocate
- *
- * Returns 0 on success and negative on failure
- */
-static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
-{
- int pre_existing_vfs = pci_num_vf(pf->pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- if (pre_existing_vfs && pre_existing_vfs != num_vfs)
- ice_free_vfs(pf);
- else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return 0;
-
- if (num_vfs > pf->num_vfs_supported) {
- dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
- num_vfs, pf->num_vfs_supported);
- return -EOPNOTSUPP;
- }
-
- dev_info(dev, "Enabling %d VFs\n", num_vfs);
- err = ice_ena_vfs(pf, num_vfs);
- if (err) {
- dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
- return err;
- }
-
- set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return 0;
-}
-
-/**
- * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
- * @pf: PF to enabled SR-IOV on
- */
-static int ice_check_sriov_allowed(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
-
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/**
- * ice_sriov_configure - Enable or change number of VFs via sysfs
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate or 0 to free VFs
- *
- * This function is called when the user updates the number of VFs in sysfs. On
- * success return whatever num_vfs was set to by the caller. Return negative on
- * failure.
- */
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- struct ice_pf *pf = pci_get_drvdata(pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- err = ice_check_sriov_allowed(pf);
- if (err)
- return err;
-
- if (!num_vfs) {
- if (!pci_vfs_assigned(pdev)) {
- ice_mbx_deinit_snapshot(&pf->hw);
- ice_free_vfs(pf);
- if (pf->lag)
- ice_enable_lag(pf->lag);
- return 0;
- }
-
- dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
- return -EBUSY;
- }
-
- err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (err)
- return err;
-
- err = ice_pci_sriov_ena(pf, num_vfs);
- if (err) {
- ice_mbx_deinit_snapshot(&pf->hw);
- return err;
- }
-
- if (pf->lag)
- ice_disable_lag(pf->lag);
- return num_vfs;
-}
-
-/**
- * ice_process_vflr_event - Free VF resources via IRQ calls
- * @pf: pointer to the PF structure
- *
- * called from the VFLR IRQ handler to
- * free up VF resources and state variables
- */
-void ice_process_vflr_event(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- unsigned int vf_id;
- u32 reg;
-
- if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
- !pf->num_alloc_vfs)
- return;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr VFs */
- reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx)) {
- /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
- mutex_lock(&vf->cfg_lock);
- ice_reset_vf(vf, true);
- mutex_unlock(&vf->cfg_lock);
- }
- }
-}
-
-/**
- * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
- * @vf: pointer to the VF info
- */
-static void ice_vc_reset_vf(struct ice_vf *vf)
-{
- ice_vc_notify_vf_reset(vf);
- ice_reset_vf(vf, false);
-}
-
-/**
- * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
- * @pf: PF used to index all VFs
- * @pfq: queue index relative to the PF's function space
- *
- * If no VF is found who owns the pfq then return NULL, otherwise return a
- * pointer to the VF who owns the pfq
- */
-static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
-{
- unsigned int vf_id;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- struct ice_vsi *vsi;
- u16 rxq_idx;
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_for_each_rxq(vsi, rxq_idx)
- if (vsi->rxq_map[rxq_idx] == pfq)
- return vf;
- }
-
- return NULL;
-}
-
-/**
- * ice_globalq_to_pfq - convert from global queue index to PF space queue index
- * @pf: PF used for conversion
- * @globalq: global queue index used to convert to PF space queue index
- */
-static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
-{
- return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
-}
-
-/**
- * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
- * @pf: PF that the LAN overflow event happened on
- * @event: structure holding the event information for the LAN overflow event
- *
- * Determine if the LAN overflow event was caused by a VF queue. If it was not
- * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
- * reset on the offending VF.
- */
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- u32 gldcb_rtctq, queue;
- struct ice_vf *vf;
-
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
- dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
-
- /* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
-
- vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
- if (!vf)
- return;
-
- mutex_lock(&vf->cfg_lock);
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
-}
-
-/**
* ice_vc_send_msg_to_vf - Send message to VF
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
@@ -2173,13 +299,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
struct ice_pf *pf;
int aq_ret;
- if (!vf)
- return -EINVAL;
-
pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return -EINVAL;
-
dev = ice_pf_to_dev(pf);
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
@@ -2233,7 +353,7 @@ static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
max_frame_size = pi->phy.link_info.max_frame_size;
- if (vf->port_vlan_info)
+ if (ice_vf_is_port_vlan_ena(vf))
max_frame_size -= VLAN_HLEN;
return max_frame_size;
@@ -2250,12 +370,12 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_resource *vfres = NULL;
- struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw = &vf->pf->hw;
struct ice_vsi *vsi;
int len = 0;
int ret;
- if (ice_check_vf_init(pf, vf)) {
+ if (ice_check_vf_init(vf)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
@@ -2282,8 +402,33 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
- if (!vsi->info.pvid)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ /* VLAN offloads based on current device configuration */
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+ } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+ * these two conditions, which amounts to guest VLAN filtering
+ * and offloads being based on the inner VLAN or the
+ * inner/single VLAN respectively and don't allow VF to
+ * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+ */
+ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (!ice_is_dvm_ena(hw) &&
+ !ice_vf_is_port_vlan_ena(vf)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+ /* configure backward compatible support for VFs that
+ * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+ * configured in SVM, and no port VLAN is configured
+ */
+ ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+ } else if (ice_is_dvm_ena(hw)) {
+ /* configure software offloaded VLAN support when DVM
+ * is enabled, but no port VLAN is enabled
+ */
+ ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+ }
+ }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
@@ -2327,7 +472,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_msix_per_vf;
+ vfres->max_vectors = vf->pf->vfs.num_msix_per;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
@@ -2366,7 +511,7 @@ err:
static void ice_vc_reset_vf_msg(struct ice_vf *vf)
{
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- ice_reset_vf(vf, false);
+ ice_reset_vf(vf, 0);
}
/**
@@ -2401,7 +546,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
vsi = ice_find_vsi_from_id(pf, vsi_id);
- return (vsi && (vsi->vf_id == vf->vf_id));
+ return (vsi && (vsi->vf == vf));
}
/**
@@ -2833,150 +978,6 @@ error_param:
}
/**
- * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
- * @vf: The VF being resseting
- *
- * The max poll time is about ~800ms, which is about the maximum time it takes
- * for a VF to be reset and/or a VF driver to be removed.
- */
-static void ice_wait_on_vf_reset(struct ice_vf *vf)
-{
- int i;
-
- for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- break;
- msleep(ICE_MAX_VF_RESET_SLEEP_MS);
- }
-}
-
-/**
- * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
- * @vf: VF to check if it's ready to be configured/queried
- *
- * The purpose of this function is to make sure the VF is not in reset, not
- * disabled, and initialized so it can be configured and/or queried by a host
- * administrator.
- */
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- struct ice_pf *pf;
-
- ice_wait_on_vf_reset(vf);
-
- if (ice_is_vf_disabled(vf))
- return -EINVAL;
-
- pf = vf->pf;
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- return 0;
-}
-
-/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
- struct ice_vsi_ctx *ctx;
- struct ice_vsi *vf_vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vf_vsi = ice_get_vf_vsi(vf);
- if (!vf_vsi) {
- netdev_err(netdev, "VSI %d for VF %d is null\n",
- vf->lan_vsi_idx, vf->vf_id);
- return -EINVAL;
- }
-
- if (vf_vsi->type != ICE_VSI_VF) {
- netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
- vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
- return -ENODEV;
- }
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.sec_flags = vf_vsi->info.sec_flags;
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (ena) {
- ctx->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctx->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
-
- ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (ret) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret);
- goto out;
- }
-
- /* only update spoofchk state and VSI context on success */
- vf_vsi->info.sec_flags = ctx->info.sec_flags;
- vf->spoofchk = ena;
-
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
- * @pf: PF structure for accessing VF(s)
- *
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
- * else return true
- */
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
-{
- int vf_idx;
-
- ice_for_each_vf(pf, vf_idx) {
- struct ice_vf *vf = &pf->vf[vf_idx];
-
- /* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- return true;
- }
-
- return false;
-}
-
-/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2989,6 +990,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
+ struct ice_vsi_vlan_ops *vlan_ops;
int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -3012,7 +1014,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
}
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if (!ice_is_vf_trusted(vf)) {
dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
/* Leave v_ret alone, lie to the VF on purpose. */
@@ -3027,16 +1029,15 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
rm_promisc = !allmulti && !alluni;
- if (vsi->num_vlan || vf->port_vlan_info) {
- if (rm_promisc)
- ret = ice_cfg_vlan_pruning(vsi, true);
- else
- ret = ice_cfg_vlan_pruning(vsi, false);
- if (ret) {
- dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ if (rm_promisc)
+ ret = vlan_ops->ena_rx_filtering(vsi);
+ else
+ ret = vlan_ops->dis_rx_filtering(vsi);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
}
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
@@ -3063,7 +1064,8 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
} else {
u8 mcast_m, ucast_m;
- if (vf->port_vlan_info || vsi->num_vlan > 1) {
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
} else {
@@ -3090,16 +1092,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
!test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
vf->vf_id);
- else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ else if (!allmulti &&
+ test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
+ vf->vf_states))
dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
vf->vf_id);
}
if (!ucast_err) {
- if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ if (alluni &&
+ !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
vf->vf_id);
- else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ else if (!alluni &&
+ test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
+ vf->vf_states))
dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
vf->vf_id);
}
@@ -3492,7 +1499,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_msix_per_vf < num_q_vectors_mapped ||
+ pf->vfs.num_msix_per < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -3514,7 +1521,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->num_msix_per_vf) ||
+ if (!(vector_id < pf->vfs.num_msix_per) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3643,10 +1650,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is not
- * expected to account for it in the MTU calculation
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
*/
- if (vf->port_vlan_info)
+ if (ice_vf_is_port_vlan_ena(vf))
vsi->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
@@ -3663,15 +1671,6 @@ error_param:
}
/**
- * ice_is_vf_trusted
- * @vf: pointer to the VF info
- */
-static bool ice_is_vf_trusted(struct ice_vf *vf)
-{
- return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -4045,7 +2044,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
- ice_vc_reset_vf(vf);
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(dev, "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
@@ -4058,78 +2057,91 @@ error_param:
}
/**
- * ice_set_vf_port_vlan
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @vlan_id: VLAN ID being set
- * @qos: priority setting
- * @vlan_proto: VLAN protocol
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
*
- * program VF Port VLAN ID and/or QoS
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
*/
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto)
+static bool ice_vf_vlan_offload_ena(u32 caps)
{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
- struct ice_vf *vf;
- u16 vlanprio;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (vlan_id >= VLAN_N_VID || qos > 7) {
- dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
- vf_id, vlan_id, qos);
- return -EINVAL;
- }
-
- if (vlan_proto != htons(ETH_P_8021Q)) {
- dev_err(dev, "VF VLAN protocol is not supported\n");
- return -EPROTONOSUPPORT;
- }
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
+/**
+ * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
+ * @vf: VF used to determine if VLAN promiscuous config is allowed
+ */
+static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
+ return true;
- vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
+ return false;
+}
- if (vf->port_vlan_info == vlanprio) {
- /* duplicate request, so just return success */
- dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
- return 0;
- }
+/**
+ * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to enable VLAN promiscuous mode
+ * @vlan: VLAN used to enable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
- mutex_lock(&vf->cfg_lock);
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -EEXIST)
+ return status;
- vf->port_vlan_info = vlanprio;
+ return 0;
+}
- if (vf->port_vlan_info)
- dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
- vlan_id, qos, vf_id);
- else
- dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+/**
+ * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to disable VLAN promiscuous mode for
+ * @vlan: VLAN used to disable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -ENOENT)
+ return status;
return 0;
}
/**
- * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
- * @caps: VF driver negotiated capabilities
+ * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
+ * @vf: VF to check against
+ * @vsi: VF's VSI
*
- * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ * If the VF is trusted then the VF is allowed to add as many VLANs as it
+ * wants to, so return false.
+ *
+ * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
+ * allowed VLANs for an untrusted VF. Return the result of this comparison.
*/
-static bool ice_vf_vlan_offload_ena(u32 caps)
+static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+ if (ice_is_vf_trusted(vf))
+ return false;
+
+#define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
+ return ((ice_vsi_num_non_zero_vlans(vsi) +
+ ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
}
/**
@@ -4149,9 +2161,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
bool vlan_promisc = false;
struct ice_vsi *vsi;
struct device *dev;
- struct ice_hw *hw;
int status = 0;
- u8 promisc_m;
int i;
dev = ice_pf_to_dev(pf);
@@ -4179,15 +2189,13 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
}
- hw = &pf->hw;
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (add_v && !ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
@@ -4196,22 +2204,28 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- if (vsi->info.pvid) {
+ /* in DVM a VF can add/delete inner VLAN filters when
+ * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
- test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
- vlan_promisc = true;
+ /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
+ * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
+ * allow vlan_promisc = true in SVM and if no port VLAN is configured
+ */
+ vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
+ !ice_is_dvm_ena(&pf->hw) &&
+ !ice_vf_is_port_vlan_ena(vf);
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
- if (!ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ if (ice_vf_has_max_vlans(vf, vsi)) {
dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
@@ -4228,29 +2242,23 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- /* Enable VLAN pruning when non-zero VLAN is added */
- if (!vlan_promisc && vid &&
- !ice_vsi_is_vlan_pruning_ena(vsi)) {
- status = ice_cfg_vlan_pruning(vsi, true);
- if (status) {
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
}
} else if (vlan_promisc) {
- /* Enable Ucast/Mcast VLAN promiscuous mode */
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- status = ice_set_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
+ status = ice_vf_ena_vlan_promisc(vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
@@ -4271,6 +2279,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
num_vf_vlan = vsi->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
/* we add VLAN 0 by default for each VF so we can enable
* Tx VLAN anti-spoof without triggering MDD events so
@@ -4279,28 +2288,19 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
- /* Make sure ice_vsi_kill_vlan is successful before
- * updating VLAN information
- */
- status = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- /* Disable VLAN pruning when only VLAN 0 is left */
- if (vsi->num_vlan == 1 &&
- ice_vsi_is_vlan_pruning_ena(vsi))
- ice_cfg_vlan_pruning(vsi, false);
-
- /* Disable Unicast/Multicast VLAN promiscuous mode */
- if (vlan_promisc) {
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi))
+ vsi->inner_vlan_ops.dis_rx_filtering(vsi);
- ice_clear_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
- }
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
}
}
@@ -4360,7 +2360,7 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
}
vsi = ice_get_vf_vsi(vf);
- if (ice_vsi_manage_vlan_stripping(vsi, true))
+ if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
@@ -4395,7 +2395,7 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
goto error_param;
}
- if (ice_vsi_manage_vlan_stripping(vsi, false))
+ if (vsi->inner_vlan_ops.dis_stripping(vsi))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
@@ -4407,11 +2407,8 @@ error_param:
* ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
* @vf: VF to enable/disable VLAN stripping for on initialization
*
- * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
- * the flag is cleared then we want to disable stripping. For example, the flag
- * will be cleared when port VLANs are configured by the administrator before
- * passing the VF to the guest or if the AVF driver doesn't support VLAN
- * offloads.
+ * Set the default for VLAN stripping based on whether a port VLAN is configured
+ * and the current VLAN mode of the device.
*/
static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
{
@@ -4420,17 +2417,977 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
if (!vsi)
return -EINVAL;
- /* don't modify stripping if port VLAN is configured */
- if (vsi->info.pvid)
+ /* don't modify stripping if port VLAN is configured in SVM since the
+ * port VLAN is based on the inner/single VLAN in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
return 0;
if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return ice_vsi_manage_vlan_stripping(vsi, true);
+ return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ else
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
+}
+
+static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ return VLAN_N_VID;
+ else
+ return ICE_MAX_VLAN_PER_VF;
+}
+
+/**
+ * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
+ * @vf: VF that being checked for
+ *
+ * When the device is in double VLAN mode, check whether or not the outer VLAN
+ * is allowed.
+ */
+static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
+{
+ if (ice_vf_is_port_vlan_ena(vf))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF should use the inner
+ * filtering/offload capabilities since the port VLAN is using the outer VLAN
+ * capabilies.
+ */
+static void
+ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_outer_vlan_not_allowed(vf)) {
+ /* until support for inner VLAN filtering is added when a port
+ * VLAN is configured, only support software offloaded inner
+ * VLANs when a port VLAN is confgured in DVM
+ */
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_AND;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ }
+
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+}
+
+/**
+ * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF does not have any VLAN
+ * filtering or offload capabilities since the port VLAN is using the inner VLAN
+ * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
+ * VLAN fitlering and offload capabilities.
+ */
+static void
+ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.max_filters = 0;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+ }
+}
+
+/**
+ * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
+ * @vf: VF to determine VLAN capabilities for
+ *
+ * This will only be called if the VF and PF successfully negotiated
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ *
+ * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
+ * is configured or not.
+ */
+static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_caps *caps = NULL;
+ int err, len = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto out;
+ }
+ len = sizeof(*caps);
+
+ if (ice_is_dvm_ena(&vf->pf->hw))
+ ice_vc_set_dvm_caps(vf, caps);
else
- return ice_vsi_manage_vlan_stripping(vsi, false);
+ ice_vc_set_svm_caps(vf, caps);
+
+ /* store negotiated caps to prevent invalid VF messages */
+ memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
+
+out:
+ err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ v_ret, (u8 *)caps, len);
+ kfree(caps);
+ return err;
}
-static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
+/**
+ * ice_vc_validate_vlan_tpid - validate VLAN TPID
+ * @filtering_caps: negotiated/supported VLAN filtering capabilities
+ * @tpid: VLAN TPID used for validation
+ *
+ * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
+ * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
+ */
+static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
+{
+ enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ break;
+ case ETH_P_8021AD:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ break;
+ case ETH_P_QINQ1:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
+ break;
+ }
+
+ if (!(filtering_caps & vlan_ethertype))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_is_valid_vlan - validate the virtchnl_vlan
+ * @vc_vlan: virtchnl_vlan to validate
+ *
+ * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
+ * false. Otherwise return true.
+ */
+static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ if (!vc_vlan->tci || !vc_vlan->tpid)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF. If any of
+ * the checks fail then return false. Otherwise return true.
+ */
+static bool
+ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 i;
+
+ if (!vfl->num_elements)
+ return false;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vfc->filtering_support;
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *outer = &vlan_fltr->outer;
+ struct virtchnl_vlan *inner = &vlan_fltr->inner;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
+ return false;
+
+ if ((outer->tci_mask &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
+ (inner->tci_mask &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
+ return false;
+
+ if (((outer->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
+ ((inner->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
+ return false;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->outer,
+ outer->tpid)) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->inner,
+ inner->tpid)))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
+ * @vc_vlan: struct virtchnl_vlan to transform
+ */
+static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ struct ice_vlan vlan = { 0 };
+
+ vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
+ vlan.tpid = vc_vlan->tpid;
+
+ return vlan;
+}
+
+/**
+ * ice_vc_vlan_action - action to perform on the virthcnl_vlan
+ * @vsi: VF's VSI used to perform the action
+ * @vlan_action: function to perform the action with (i.e. add/del)
+ * @vlan: VLAN filter to perform the action with
+ */
+static int
+ice_vc_vlan_action(struct ice_vsi *vsi,
+ int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
+ struct ice_vlan *vlan)
+{
+ int err;
+
+ err = vlan_action(vsi, vlan);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
+ * @vf: VF used to delete the VLAN(s)
+ * @vsi: VF's VSI used to delete the VLAN(s)
+ * @vfl: virthchnl filter list used to delete the filters
+ */
+static int
+ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_del_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
+ * @vf: VF used to add the VLAN(s)
+ * @vsi: VF's VSI used to add the VLAN(s)
+ * @vfl: virthchnl filter list used to add the filters
+ */
+static int
+ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
+ * @vsi: VF VSI used to get number of existing VLAN filters
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF during the
+ * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
+ * Otherwise return true.
+ */
+static bool
+ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
+ struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+
+ if (num_requested_filters > vfc->max_filters)
+ return false;
+
+ return ice_vc_validate_vlan_filter_list(vfc, vfl);
+}
+
+/**
+ * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_validate_add_vlan_filter_list(vsi,
+ &vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_add_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_valid_vlan_setting - validate VLAN setting
+ * @negotiated_settings: negotiated VLAN settings during VF init
+ * @ethertype_setting: ethertype(s) requested for the VLAN setting
+ */
+static bool
+ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
+{
+ if (ethertype_setting && !(negotiated_settings & ethertype_setting))
+ return false;
+
+ /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
+ * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
+ */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
+ hweight32(ethertype_setting) > 1)
+ return false;
+
+ /* ability to modify the VLAN setting was not negotiated */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
+ * @caps: negotiated VLAN settings during VF init
+ * @msg: message to validate
+ *
+ * Used to validate any VLAN virtchnl message sent as a
+ * virtchnl_vlan_setting structure. Validates the message against the
+ * negotiated/supported caps during VF driver init.
+ */
+static bool
+ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
+ struct virtchnl_vlan_setting *msg)
+{
+ if ((!msg->outer_ethertype_setting &&
+ !msg->inner_ethertype_setting) ||
+ (!caps->outer && !caps->inner))
+ return false;
+
+ if (msg->outer_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->outer,
+ msg->outer_ethertype_setting))
+ return false;
+
+ if (msg->inner_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->inner,
+ msg->inner_ethertype_setting))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
+ * @tpid: VLAN TPID to populate
+ */
+static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
+{
+ switch (ethertype_setting) {
+ case VIRTCHNL_VLAN_ETHERTYPE_8100:
+ *tpid = ETH_P_8021Q;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_88A8:
+ *tpid = ETH_P_8021AD;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_9100:
+ *tpid = ETH_P_QINQ1;
+ break;
+ default:
+ *tpid = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
+ * @vsi: VF's VSI used to enable the VLAN offload
+ * @ena_offload: function used to enable the VLAN offload
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
+ */
+static int
+ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
+ int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
+ u32 ethertype_setting)
+{
+ u16 tpid;
+ int err;
+
+ err = ice_vc_get_tpid(ethertype_setting, &tpid);
+ if (err)
+ return err;
+
+ err = ena_offload(vsi, tpid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
+
+/**
+ * ice_vc_ena_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (ice_vc_ena_vlan_offload(vsi,
+ vsi->outer_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support outer stripping so the first tag always ends
+ * up in L2TAG2_2ND and the second/inner tag, if
+ * enabled, is extracted in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support inner stripping while outer stripping is
+ * disabled so that the first and only tag is extracted
+ * in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_ena_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
.reset_vf = ice_vc_reset_vf_msg,
@@ -4452,11 +3409,22 @@ static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
.add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
.del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
};
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
+/**
+ * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
{
- *ops = ice_vc_vf_dflt_ops;
+ vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
}
/**
@@ -4589,15 +3557,44 @@ ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
NULL, 0);
}
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
+static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_repr_add_mac,
+ .del_mac_addr_msg = ice_vc_repr_del_mac,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
+ .add_vlan_msg = ice_vc_repr_add_vlan,
+ .remove_vlan_msg = ice_vc_repr_del_vlan,
+ .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
{
- ops->add_mac_addr_msg = ice_vc_repr_add_mac;
- ops->del_mac_addr_msg = ice_vc_repr_del_mac;
- ops->add_vlan_msg = ice_vc_repr_add_vlan;
- ops->remove_vlan_msg = ice_vc_repr_del_vlan;
- ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
- ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
- ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
+ vf->virtchnl_ops = &ice_virtchnl_repr_ops;
}
/**
@@ -4612,20 +3609,21 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
{
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
+ const struct ice_virtchnl_ops *ops;
u16 msglen = event->msg_len;
- struct ice_vc_vf_ops *ops;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id)) {
- err = -EINVAL;
- goto error_handler;
- }
- vf = &pf->vf[vf_id];
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf) {
+ dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
+ vf_id, v_opcode, msglen);
+ return;
+ }
/* Check if VF is disabled. */
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
@@ -4633,7 +3631,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
goto error_handler;
}
- ops = &vf->vc_ops;
+ ops = vf->virtchnl_ops;
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
@@ -4648,6 +3646,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
ice_vc_send_msg_to_vf(vf, v_opcode,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
0);
+ ice_put_vf(vf);
return;
}
@@ -4657,6 +3656,7 @@ error_handler:
NULL, 0);
dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
+ ice_put_vf(vf);
return;
}
@@ -4666,6 +3666,7 @@ error_handler:
if (!mutex_trylock(&vf->cfg_lock)) {
dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
vf->vf_id);
+ ice_put_vf(vf);
return;
}
@@ -4676,7 +3677,7 @@ error_handler:
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ops->get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
+ dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
@@ -4741,6 +3742,27 @@ error_handler:
case VIRTCHNL_OP_DEL_RSS_CFG:
err = ops->handle_rss_cfg_msg(vf, msg, false);
break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ err = ops->get_offload_vlan_v2_caps(vf);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ err = ops->add_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ err = ops->remove_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ err = ops->ena_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ err = ops->dis_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ err = ops->ena_vlan_insertion_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ err = ops->dis_vlan_insertion_v2_msg(vf, msg);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
@@ -4759,549 +3781,5 @@ error_handler:
}
mutex_unlock(&vf->cfg_lock);
-}
-
-/**
- * ice_get_vf_cfg
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ivi: VF configuration structure
- *
- * return VF configuration
- */
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
-
- /* VF configuration for VLAN and applicable QoS */
- ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
- ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-
- ivi->trusted = vf->trusted;
- ivi->spoofchk = vf->spoofchk;
- if (!vf->link_forced)
- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
- else if (vf->link_up)
- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- else
- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->max_tx_rate;
- ivi->min_tx_rate = vf->min_tx_rate;
- return 0;
-}
-
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
- struct ice_sw_recipe *mac_recipe_list =
- &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *rule_head;
- struct mutex *rule_lock; /* protect MAC filter list access */
-
- rule_head = &mac_recipe_list->filt_rules;
- rule_lock = &mac_recipe_list->filt_rule_lock;
-
- mutex_lock(rule_lock);
- list_for_each_entry(list_itr, rule_head, list_entry) {
- u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(existing_mac, umac)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
-
- mutex_unlock(rule_lock);
-
- return false;
-}
-
-/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @mac: MAC address
- *
- * program VF MAC address
- */
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
- return -EINVAL;
- }
-
- vf = &pf->vf[vf_id];
- /* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac))
- return 0;
-
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- if (ice_unicast_mac_exists(pf, mac)) {
- netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
- mac, vf_id, mac);
- return -EINVAL;
- }
-
- mutex_lock(&vf->cfg_lock);
-
- /* VF is notified of its new MAC via the PF's response to the
- * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
- */
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
- if (is_zero_ether_addr(mac)) {
- /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
- vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
- } else {
- /* PF will add MAC rule for the VF */
- vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
- }
-
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
- return 0;
-}
-
-/**
- * ice_set_vf_trust
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @trusted: Boolean value to enable/disable trusted VF
- *
- * Enable or disable a given VF as trusted
- */
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_is_eswitch_mode_switchdev(pf)) {
- dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- /* Check if already trusted */
- if (trusted == vf->trusted)
- return 0;
-
- mutex_lock(&vf->cfg_lock);
-
- vf->trusted = trusted;
- ice_vc_reset_vf(vf);
- dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
- vf_id, trusted ? "" : "un");
-
- mutex_unlock(&vf->cfg_lock);
-
- return 0;
-}
-
-/**
- * ice_set_vf_link_state
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @link_state: required link state
- *
- * Set VF's link state, irrespective of physical link state status
- */
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- switch (link_state) {
- case IFLA_VF_LINK_STATE_AUTO:
- vf->link_forced = false;
- break;
- case IFLA_VF_LINK_STATE_ENABLE:
- vf->link_forced = true;
- vf->link_up = true;
- break;
- case IFLA_VF_LINK_STATE_DISABLE:
- vf->link_forced = true;
- vf->link_up = false;
- break;
- default:
- return -EINVAL;
- }
-
- ice_vc_notify_vf_link_state(vf);
-
- return 0;
-}
-
-/**
- * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
- * @pf: PF associated with VFs
- */
-static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
-{
- int rate = 0, i;
-
- ice_for_each_vf(pf, i)
- rate += pf->vf[i].min_tx_rate;
-
- return rate;
-}
-
-/**
- * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
- * @vf: VF trying to configure min_tx_rate
- * @min_tx_rate: min Tx rate in Mbps
- *
- * Check if the min_tx_rate being passed in will cause oversubscription of total
- * min_tx_rate based on the current link speed and all other VFs configured
- * min_tx_rate
- *
- * Return true if the passed min_tx_rate would cause oversubscription, else
- * return false
- */
-static bool
-ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
-{
- int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
- int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
-
- /* this VF's previous rate is being overwritten */
- all_vfs_min_tx_rate -= vf->min_tx_rate;
-
- if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
- dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
- min_tx_rate, vf->vf_id,
- all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
- link_speed_mbps);
- return true;
- }
-
- return false;
-}
-
-/**
- * ice_set_vf_bw - set min/max VF bandwidth
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @min_tx_rate: Minimum Tx rate in Mbps
- * @max_tx_rate: Maximum Tx rate in Mbps
- */
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
-
- /* when max_tx_rate is zero that means no max Tx rate limiting, so only
- * check if max_tx_rate is non-zero
- */
- if (max_tx_rate && min_tx_rate > max_tx_rate) {
- dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
- min_tx_rate, max_tx_rate);
- return -EINVAL;
- }
-
- if (min_tx_rate && ice_is_dcb_active(pf)) {
- dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
- return -EINVAL;
-
- if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
- ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->min_tx_rate = min_tx_rate;
- }
-
- if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
- ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->max_tx_rate = max_tx_rate;
- }
-
- return 0;
-}
-
-/**
- * ice_get_vf_stats - populate some stats for the VF
- * @netdev: the netdev of the PF
- * @vf_id: the host OS identifier (0-255)
- * @vf_stats: pointer to the OS memory to be initialized
- */
-int ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_eth_stats *stats;
- struct ice_vsi *vsi;
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- ice_update_eth_stats(vsi);
- stats = &vsi->eth_stats;
-
- memset(vf_stats, 0, sizeof(*vf_stats));
-
- vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
- stats->rx_multicast;
- vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
- stats->tx_multicast;
- vf_stats->rx_bytes = stats->rx_bytes;
- vf_stats->tx_bytes = stats->tx_bytes;
- vf_stats->broadcast = stats->rx_broadcast;
- vf_stats->multicast = stats->rx_multicast;
- vf_stats->rx_dropped = stats->rx_discards;
- vf_stats->tx_dropped = stats->tx_discards;
-
- return 0;
-}
-
-/**
- * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
- * @vf: pointer to the VF structure
- */
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
-}
-
-/**
- * ice_print_vfs_mdd_events - print VFs malicious driver detect event
- * @pf: pointer to the PF structure
- *
- * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
- */
-void ice_print_vfs_mdd_events(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int i;
-
- /* check that there are pending MDD events to print */
- if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
- return;
-
- /* VF MDD event logs are rate limited to one second intervals */
- if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
- return;
-
- pf->last_printed_mdd_jiffies = jiffies;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- /* only print Rx MDD event message if there are new events */
- if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
- vf->mdd_rx_events.last_printed =
- vf->mdd_rx_events.count;
- ice_print_vf_rx_mdd_event(vf);
- }
-
- /* only print Tx MDD event message if there are new events */
- if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
- vf->mdd_tx_events.last_printed =
- vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, i,
- vf->dev_lan_addr.addr);
- }
- }
-}
-
-/**
- * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
- *
- * Called when recovering from a PF FLR to restore interrupt capability to
- * the VFs.
- */
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
-{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
-}
-
-/**
- * ice_is_malicious_vf - helper function to detect a malicious VF
- * @pf: ptr to struct ice_pf
- * @event: pointer to the AQ event
- * @num_msg_proc: the number of messages processed so far
- * @num_msg_pending: the number of messages peinding in admin queue
- */
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending)
-{
- s16 vf_id = le16_to_cpu(event->desc.retval);
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_mbx_data mbxdata;
- bool malvf = false;
- struct ice_vf *vf;
- int status;
-
- if (ice_validate_vf_id(pf, vf_id))
- return false;
-
- vf = &pf->vf[vf_id];
- /* Check if VF is disabled. */
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
-
- mbxdata.num_msg_proc = num_msg_proc;
- mbxdata.num_pending_arq = num_msg_pending;
- mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
-#define ICE_MBX_OVERFLOW_WATERMARK 64
- mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
-
- /* check to see if we have a malicious VF */
- status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
- if (status)
- return false;
-
- if (malvf) {
- bool report_vf = false;
-
- /* if the VF is malicious and we haven't let the user
- * know about it, then let them know now
- */
- status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
- ICE_MAX_VF_COUNT, vf_id,
- &report_vf);
- if (status)
- dev_dbg(dev, "Error reporting malicious VF\n");
-
- if (report_vf) {
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (pf_vsi)
- dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
- pf_vsi->netdev->dev_addr);
- }
-
- return true;
- }
-
- /* if there was an error in detection or the VF is not malicious then
- * return false
- */
- return false;
+ ice_put_vf(vf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
new file mode 100644
index 000000000000..b5a3fd8adbb4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_H_
+#define _ICE_VIRTCHNL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_vf_lib.h"
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
+
+struct ice_virtchnl_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_offload_vlan_v2_caps)(struct ice_vf *vf);
+ int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf);
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
+#else /* CONFIG_PCI_IOV */
+static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
+static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+
+static inline int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ return false;
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 9feebe5f556c..5a82216e7d03 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -55,6 +55,15 @@ static const u32 vlan_allowlist_opcodes[] = {
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
};
+/* VIRTCHNL_VF_OFFLOAD_VLAN_V2 */
+static const u32 vlan_v2_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, VIRTCHNL_OP_ADD_VLAN_V2,
+ VIRTCHNL_OP_DEL_VLAN_V2, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+};
+
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
@@ -89,6 +98,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index d64df81d4893..8e38ee2faf58 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_flow.h"
+#include "ice_vf_lib_private.h"
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
@@ -1288,15 +1289,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_pf *pf = ctrl_vsi->back;
+ struct ice_vf *vf = ctrl_vsi->vf;
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir_ctx *ctx_irq;
struct ice_vf_fdir *fdir;
unsigned long flags;
struct device *dev;
- struct ice_vf *vf;
int ret;
- vf = &pf->vf[ctrl_vsi->vf_id];
+ if (WARN_ON(!vf))
+ return;
fdir = &vf->fdir;
ctx_done = &fdir->ctx_done;
@@ -1571,15 +1573,16 @@ err_exit:
*/
void ice_flush_fdir_ctx(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
return;
- ice_for_each_vf(pf, i) {
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
struct device *dev = ice_pf_to_dev(pf);
enum virtchnl_fdir_prgm_status status;
- struct ice_vf *vf = &pf->vf[i];
struct ice_vf_fdir_ctx *ctx;
unsigned long flags;
int ret;
@@ -1633,6 +1636,7 @@ err_exit:
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
index f4e629f4c09b..c5bcc8d7481c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -6,6 +6,7 @@
struct ice_vf;
struct ice_pf;
+struct ice_vsi;
enum ice_fdir_ctx_stat {
ICE_FDIR_CTX_READY,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
deleted file mode 100644
index 8f27255cc0cc..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ /dev/null
@@ -1,343 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_VIRTCHNL_PF_H_
-#define _ICE_VIRTCHNL_PF_H_
-#include "ice.h"
-#include "ice_virtchnl_fdir.h"
-
-/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
-#define ICE_MAX_VLAN_PER_VF 8
-/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
- * broadcast, and 16 for additional unicast/multicast filters
- */
-#define ICE_MAX_MACADDR_PER_VF 18
-
-/* Malicious Driver Detection */
-#define ICE_MDD_EVENTS_THRESHOLD 30
-
-/* Static VF transaction/status register def */
-#define VF_DEVICE_STATUS 0xAA
-#define VF_TRANS_PENDING_M 0x20
-
-/* wait defines for polling PF_PCI_CIAD register status */
-#define ICE_PCI_CIAD_WAIT_COUNT 100
-#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-
-/* VF resource constraints */
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_RSS_QS_PER_VF 16
-#define ICE_NUM_VF_MSIX_MED 17
-#define ICE_NUM_VF_MSIX_SMALL 5
-#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_MAX_VF_RESET_TRIES 40
-#define ICE_MAX_VF_RESET_SLEEP_MS 20
-
-#define ice_for_each_vf(pf, i) \
- for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
-
-/* Specific VF states */
-enum ice_vf_states {
- ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
- ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
- ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
- ICE_VF_STATE_DIS,
- ICE_VF_STATE_MC_PROMISC,
- ICE_VF_STATE_UC_PROMISC,
- ICE_VF_STATES_NBITS
-};
-
-/* VF capabilities */
-enum ice_virtchnl_cap {
- ICE_VIRTCHNL_VF_CAP_L2 = 0,
- ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
-};
-
-struct ice_time_mac {
- unsigned long time_modified;
- u8 addr[ETH_ALEN];
-};
-
-/* VF MDD events print structure */
-struct ice_mdd_vf_events {
- u16 count; /* total count of Rx|Tx events */
- /* count number of the last printed event */
- u16 last_printed;
-};
-
-struct ice_vf;
-
-struct ice_vc_vf_ops {
- int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
- int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
- void (*reset_vf)(struct ice_vf *vf);
- int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
- int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
- int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_vlan_stripping)(struct ice_vf *vf);
- int (*dis_vlan_stripping)(struct ice_vf *vf);
- int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
- int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
-};
-
-/* VF information structure */
-struct ice_vf {
- struct ice_pf *pf;
-
- /* Used during virtchnl message handling and NDO ops against the VF
- * that will trigger a VFR
- */
- struct mutex cfg_lock;
-
- u16 vf_id; /* VF ID in the PF space */
- u16 lan_vsi_idx; /* index into PF struct */
- u16 ctrl_vsi_idx;
- struct ice_vf_fdir fdir;
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
- struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
- struct virtchnl_version_info vf_ver;
- u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
- struct ice_time_mac legacy_last_added_umac;
- DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- u16 port_vlan_info; /* Port VLAN ID and QoS */
- u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
- u8 trusted:1;
- u8 spoofchk:1;
- u8 link_forced:1;
- u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
- unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
- DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
-
- unsigned long vf_caps; /* VF's adv. capabilities */
- u8 num_req_qs; /* num of queue pairs requested by VF */
- u16 num_mac;
- u16 num_vf_qs; /* num of queue configured per VF */
- struct ice_mdd_vf_events mdd_rx_events;
- struct ice_mdd_vf_events mdd_tx_events;
- DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
-
- struct ice_repr *repr;
-
- struct ice_vc_vf_ops vc_ops;
-
- /* devlink port data */
- struct devlink_port devlink_port;
-};
-
-#ifdef CONFIG_PCI_IOV
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
-void ice_process_vflr_event(struct ice_pf *pf);
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
-
-void ice_free_vfs(struct ice_pf *pf);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_vc_notify_link_state(struct ice_pf *pf);
-void ice_vc_notify_reset(struct ice_pf *pf);
-void ice_vc_notify_vf_link_state(struct ice_vf *vf);
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending);
-
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto);
-
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
-
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
-
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
-
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-
-bool ice_is_vf_disabled(struct ice_vf *vf);
-
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
-
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-int
-ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_print_vfs_mdd_events(struct ice_pf *pf);
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
-#else /* CONFIG_PCI_IOV */
-static inline void ice_process_vflr_event(struct ice_pf *pf) { }
-static inline void ice_free_vfs(struct ice_pf *pf) { }
-static inline
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
-static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
-static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
-static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
-static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
-static inline
-void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
-static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
-
-static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- return true;
-}
-
-static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return NULL;
-}
-
-static inline bool
-ice_is_malicious_vf(struct ice_pf __always_unused *pf,
- struct ice_rq_event_info __always_unused *event,
- u16 __always_unused num_msg_proc,
- u16 __always_unused num_msg_pending)
-{
- return false;
-}
-
-static inline bool
-ice_reset_all_vfs(struct ice_pf __always_unused *pf,
- bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline bool
-ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline int
-ice_sriov_configure(struct pci_dev __always_unused *pdev,
- int __always_unused num_vfs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_mac(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u8 __always_unused *mac)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_get_vf_cfg(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_info __always_unused *ivi)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_trust(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused trusted)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u16 __always_unused vid,
- u8 __always_unused qos, __be16 __always_unused v_proto)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused ena)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_link_state(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused link_state)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
- struct ice_q_vector __always_unused *q_vector)
-{
- return 0;
-}
-
-static inline int
-ice_get_vf_stats(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_stats __always_unused *vf_stats)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
-{
- return false;
-}
-#endif /* CONFIG_PCI_IOV */
-#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan.h b/drivers/net/ethernet/intel/ice/ice_vlan.h
new file mode 100644
index 000000000000..bc4550a03173
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_H_
+#define _ICE_VLAN_H_
+
+#include <linux/types.h>
+#include "ice_type.h"
+
+struct ice_vlan {
+ u16 tpid;
+ u16 vid;
+ u8 prio;
+};
+
+#define ICE_VLAN(tpid, vid, prio) ((struct ice_vlan){ tpid, vid, prio })
+
+#endif /* _ICE_VLAN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
new file mode 100644
index 000000000000..1b618de592b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_common.h"
+
+/**
+ * ice_pkg_get_supported_vlan_mode - determine if DDP supports Double VLAN mode
+ * @hw: pointer to the HW struct
+ * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
+ */
+static int
+ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
+{
+ u16 meta_init_size = sizeof(struct ice_meta_init_section);
+ struct ice_meta_init_section *sect;
+ struct ice_buf_build *bld;
+ int status;
+
+ /* if anything fails, we assume there is no DVM support */
+ *dvm = false;
+
+ bld = ice_pkg_buf_alloc_single_section(hw,
+ ICE_SID_RXPARSER_METADATA_INIT,
+ meta_init_size, (void **)&sect);
+ if (!bld)
+ return -ENOMEM;
+
+ /* only need to read a single section */
+ sect->count = cpu_to_le16(1);
+ sect->offset = cpu_to_le16(ICE_META_VLAN_MODE_ENTRY);
+
+ status = ice_aq_upload_section(hw,
+ (struct ice_buf_hdr *)ice_pkg_buf(bld),
+ ICE_PKG_BUF_SIZE, NULL);
+ if (!status) {
+ DECLARE_BITMAP(entry, ICE_META_INIT_BITS);
+ u32 arr[ICE_META_INIT_DW_CNT];
+ u16 i;
+
+ /* convert to host bitmap format */
+ for (i = 0; i < ICE_META_INIT_DW_CNT; i++)
+ arr[i] = le32_to_cpu(sect->entry.bm[i]);
+
+ bitmap_from_arr32(entry, arr, (u16)ICE_META_INIT_BITS);
+
+ /* check if DVM is supported */
+ *dvm = test_bit(ICE_META_VLAN_MODE_BIT, entry);
+ }
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_vlan_mode - get the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @get_params: structure FW fills in based on the current VLAN mode config
+ *
+ * Get VLAN Mode Parameters (0x020D)
+ */
+static int
+ice_aq_get_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_get_vlan_mode *get_params)
+{
+ struct ice_aq_desc desc;
+
+ if (!get_params)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_get_vlan_mode_parameters);
+
+ return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params),
+ NULL);
+}
+
+/**
+ * ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns true if the hardware/firmware is configured in double VLAN mode,
+ * else return false signaling that the hardware/firmware is configured in
+ * single VLAN mode.
+ *
+ * Also, return false if this call fails for any reason (i.e. firmware doesn't
+ * support this AQ call).
+ */
+static bool ice_aq_is_dvm_ena(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_params = { 0 };
+ int status;
+
+ status = ice_aq_get_vlan_mode(hw, &get_params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA);
+}
+
+/**
+ * ice_is_dvm_ena - check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * The device is configured in single or double VLAN mode on initialization and
+ * this cannot be dynamically changed during runtime. Based on this there is no
+ * need to make an AQ call every time the driver needs to know the VLAN mode.
+ * Instead, use the cached VLAN mode.
+ */
+bool ice_is_dvm_ena(struct ice_hw *hw)
+{
+ return hw->dvm_ena;
+}
+
+/**
+ * ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded
+ * @hw: pointer to the HW structure
+ *
+ * This is only called after downloading the DDP and after the global
+ * configuration lock has been released because all ports on a device need to
+ * cache the VLAN mode.
+ */
+static void ice_cache_vlan_mode(struct ice_hw *hw)
+{
+ hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false;
+}
+
+/**
+ * ice_pkg_supports_dvm - find out if DDP supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_pkg_supports_dvm(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm;
+ int status;
+
+ status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return pkg_supports_dvm;
+}
+
+/**
+ * ice_fw_supports_dvm - find out if FW supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_fw_supports_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
+ int status;
+
+ /* If firmware returns success, then it supports DVM, else it only
+ * supports SVM
+ */
+ status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_is_dvm_supported - check if Double VLAN Mode is supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if Double VLAN Mode (DVM) is supported and false if only Single
+ * VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and
+ * firmware must support it, otherwise only SVM is supported. This function
+ * should only be called while the global config lock is held and after the
+ * package has been successfully downloaded.
+ */
+static bool ice_is_dvm_supported(struct ice_hw *hw)
+{
+ if (!ice_pkg_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "DDP doesn't support DVM\n");
+ return false;
+ }
+
+ if (!ice_fw_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "FW doesn't support DVM\n");
+ return false;
+ }
+
+ return true;
+}
+
+#define ICE_EXTERNAL_VLAN_ID_FV_IDX 11
+#define ICE_SW_LKUP_VLAN_LOC_LKUP_IDX 1
+#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
+#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
+#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_VLAN_LOC_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the VLAN
+ * packet flags to support VLAN filtering on multiple VLAN
+ * ethertypes (i.e. 0x8100 and 0x88a8) in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX,
+ .ignore_valid = false,
+ .mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK,
+ .mask_valid = true,
+ .lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_PROMISC_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_PROMISC_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX,
+ },
+};
+
+/**
+ * ice_dvm_update_dflt_recipes - update default switch recipes in DVM
+ * @hw: hardware structure used to update the recipes
+ */
+static int ice_dvm_update_dflt_recipes(struct ice_hw *hw)
+{
+ unsigned long i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_dvm_dflt_recipes); i++) {
+ struct ice_update_recipe_lkup_idx_params *params;
+ int status;
+
+ params = &ice_dvm_dflt_recipes[i];
+
+ status = ice_update_recipe_lkup_idx(hw, params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update RID %d lkup_idx %d fv_idx %d mask_valid %s mask 0x%04x\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask_valid ? "true" : "false",
+ params->mask);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_vlan_mode - set the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @set_params: requested VLAN mode configuration
+ *
+ * Set VLAN Mode Parameters (0x020C)
+ */
+static int
+ice_aq_set_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_set_vlan_mode *set_params)
+{
+ u8 rdma_packet, mng_vlan_prot_id;
+ struct ice_aq_desc desc;
+
+ if (!set_params)
+ return -EINVAL;
+
+ if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX)
+ return -EINVAL;
+
+ rdma_packet = set_params->rdma_packet;
+ if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING &&
+ rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING)
+ return -EINVAL;
+
+ mng_vlan_prot_id = set_params->mng_vlan_prot_id;
+ if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER &&
+ mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_set_vlan_mode_parameters);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
+ NULL);
+}
+
+/**
+ * ice_set_dvm - sets up software and hardware for double VLAN mode
+ * @hw: pointer to the hardware structure
+ */
+static int ice_set_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode params = { 0 };
+ int status;
+
+ params.l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG;
+ params.rdma_packet = ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ params.mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER;
+
+ status = ice_aq_set_vlan_mode(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set double VLAN mode parameters, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_dvm_update_dflt_recipes(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update default recipes for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_aq_set_port_params(hw->port_info, true, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port in double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_set_dvm_boost_entries(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set boost TCAM entries for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_svm - set single VLAN mode
+ * @hw: pointer to the HW structure
+ */
+static int ice_set_svm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode *set_params;
+ int status;
+
+ status = ice_aq_set_port_params(hw->port_info, false, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n");
+ return status;
+ }
+
+ set_params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*set_params),
+ GFP_KERNEL);
+ if (!set_params)
+ return -ENOMEM;
+
+ /* default configuration for SVM configurations */
+ set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG;
+ set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER;
+
+ status = ice_aq_set_vlan_mode(hw, set_params);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n");
+
+ devm_kfree(ice_hw_to_dev(hw), set_params);
+ return status;
+}
+
+/**
+ * ice_set_vlan_mode
+ * @hw: pointer to the HW structure
+ */
+int ice_set_vlan_mode(struct ice_hw *hw)
+{
+ if (!ice_is_dvm_supported(hw))
+ return 0;
+
+ if (!ice_set_dvm(hw))
+ return 0;
+
+ return ice_set_svm(hw);
+}
+
+/**
+ * ice_print_dvm_not_supported - print if DDP and/or FW doesn't support DVM
+ * @hw: pointer to the HW structure
+ *
+ * The purpose of this function is to print that QinQ is not supported due to
+ * incompatibilty from the DDP and/or FW. This will give a hint to the user to
+ * update one and/or both components if they expect QinQ functionality.
+ */
+static void ice_print_dvm_not_supported(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm = ice_pkg_supports_dvm(hw);
+ bool fw_supports_dvm = ice_fw_supports_dvm(hw);
+
+ if (!fw_supports_dvm && !pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package and NVM to versions that support QinQ.\n");
+ else if (!pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package to a version that supports QinQ.\n");
+ else if (!fw_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your NVM to a version that supports QinQ.\n");
+}
+
+/**
+ * ice_post_pkg_dwnld_vlan_mode_cfg - configure VLAN mode after DDP download
+ * @hw: pointer to the HW structure
+ *
+ * This function is meant to configure any VLAN mode specific functionality
+ * after the global configuration lock has been released and the DDP has been
+ * downloaded.
+ *
+ * Since only one PF downloads the DDP and configures the VLAN mode there needs
+ * to be a way to configure the other PFs after the DDP has been downloaded and
+ * the global configuration lock has been released. All such code should go in
+ * this function.
+ */
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw)
+{
+ ice_cache_vlan_mode(hw);
+
+ if (ice_is_dvm_ena(hw))
+ ice_change_proto_id_to_dvm();
+ else
+ ice_print_dvm_not_supported(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.h b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
new file mode 100644
index 000000000000..a0fb743d08e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_MODE_H_
+#define _ICE_VLAN_MODE_H_
+
+struct ice_hw;
+
+bool ice_is_dvm_ena(struct ice_hw *hw);
+int ice_set_vlan_mode(struct ice_hw *hw);
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw);
+
+#endif /* _ICE_VLAN_MODE_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
new file mode 100644
index 000000000000..5b4a0abb4607
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_lib.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice.h"
+
+static void print_invalid_tpid(struct ice_vsi *vsi, u16 tpid)
+{
+ dev_err(ice_pf_to_dev(vsi->back), "%s %d specified invalid VLAN tpid 0x%04x\n",
+ ice_vsi_type_str(vsi->type), vsi->idx, tpid);
+}
+
+/**
+ * validate_vlan - check if the ice_vlan passed in is valid
+ * @vsi: VSI used for printing error message
+ * @vlan: ice_vlan structure to validate
+ *
+ * Return true if the VLAN TPID is valid or if the VLAN TPID is 0 and the VLAN
+ * VID is 0, which allows for non-zero VLAN filters with the specified VLAN TPID
+ * and untagged VLAN 0 filters to be added to the prune list respectively.
+ */
+static bool validate_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ if (vlan->tpid != ETH_P_8021Q && vlan->tpid != ETH_P_8021AD &&
+ vlan->tpid != ETH_P_QINQ1 && (vlan->tpid || vlan->vid)) {
+ print_invalid_tpid(vsi, vlan->tpid);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vsi_add_vlan - default add VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to add
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ err = ice_fltr_add_vlan(vsi, vlan);
+ if (err && err != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ vlan->vid, vsi->vsi_num, err);
+ return err;
+ }
+
+ vsi->num_vlan++;
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan - default del VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to delete
+ */
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_fltr_remove_vlan(vsi, vlan);
+ if (!err)
+ vsi->num_vlan--;
+ else if (err == -ENOENT || err == -EBUSY)
+ err = 0;
+ else
+ dev_err(dev, "Error removing VLAN %d on VSI %i error: %d\n",
+ vlan->vid, vsi->vsi_num, err);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting inner_vlan_flags to ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags &
+ ICE_AQ_VSI_INNER_VLAN_EMODE_M);
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_inner_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena)
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
+ else
+ /* Disable stripping. Leave tag in packet */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+
+ /* Allow all packets untagged/tagged */
+ ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_stripping(vsi, true);
+}
+
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_stripping(vsi, false);
+}
+
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+/**
+ * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
+ * @vsi: the VSI to update
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
+ */
+static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
+ ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ info->port_based_inner_vlan = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = info->inner_vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.port_based_inner_vlan = info->port_based_inner_vlan;
+out:
+ kfree(ctxt);
+ return ret;
+}
+
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->tpid != ETH_P_8021Q)
+ return -EINVAL;
+
+ if (vlan->prio > 7)
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct ice_pf *pf;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
+ pf = vsi->back;
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena)
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ goto err_out;
+ }
+
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ kfree(ctxt);
+ return 0;
+
+err_out:
+ kfree(ctxt);
+ return status;
+}
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, true);
+}
+
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, false);
+}
+
+static int ice_cfg_vlan_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ else
+ ctx->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx VLAN anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, true);
+}
+
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, false);
+}
+
+/**
+ * tpid_to_vsi_outer_vlan_type - convert from TPID to VSI context based tag_type
+ * @tpid: tpid used to translate into VSI context based tag_type
+ * @tag_type: output variable to hold the VSI context based tag type
+ */
+static int tpid_to_vsi_outer_vlan_type(u16 tpid, u8 *tag_type)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_8100;
+ break;
+ case ETH_P_8021AD:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_STAG;
+ break;
+ case ETH_P_QINQ1:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_9100;
+ break;
+ default:
+ *tag_type = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ena_outer_stripping - enable outer VLAN stripping
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN stripping for
+ *
+ * Enable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for stripping will affect the TPID for insertion.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN stripping settings and the VLAN TPID. Outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This enables hardware to strip a VLAN tag with the specified TPID to be
+ * stripped from the packet and placed in the receive descriptor.
+ */
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_stripping - disable outer VLAN stripping
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN stripping settings. The VLAN TPID and outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This tells the hardware to not strip any VLAN tagged packets, thus leaving
+ * them in the packet. This enables software offloaded VLAN stripping and
+ * disables hardware offloaded VLAN stripping.
+ */
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
+ ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_ena_outer_insertion - enable outer VLAN insertion
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN insertion for
+ *
+ * Enable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for insertion will affect the TPID for stripping.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN insertion settings and the VLAN TPID. Outer VLAN
+ * stripping settings are unmodified.
+ *
+ * This allows a VLAN tag with the specified TPID to be inserted in the transmit
+ * descriptor.
+ */
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_insertion - disable outer VLAN insertion
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN insertion settings. The VLAN TPID and outer VLAN
+ * settings are unmodified.
+ *
+ * This tells the hardware to not allow any VLAN tagged packets in the transmit
+ * descriptor. This enables software offloaded VLAN insertion and disables
+ * hardware offloaded VLAN insertion.
+ */
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ ctxt->info.outer_vlan_flags |=
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * __ice_vsi_set_outer_port_vlan - set the outer port VLAN and related settings
+ * @vsi: VSI to configure
+ * @vlan_info: packed u16 that contains the VLAN prio and ID
+ * @tpid: TPID of the port VLAN
+ *
+ * Set the port VLAN prio, ID, and TPID.
+ *
+ * Enable VLAN pruning so the VSI doesn't receive any traffic that doesn't match
+ * a VLAN prune rule. The caller should take care to add a VLAN prune rule that
+ * matches the port VLAN ID and TPID.
+ *
+ * Tell hardware to strip outer VLAN tagged packets on receive and don't put
+ * them in the receive descriptor. VSI(s) in port VLANs should not be aware of
+ * the port VLAN ID or TPID they are assigned to.
+ *
+ * Tell hardware to prevent outer VLAN tag insertion on transmit and only allow
+ * untagged outer packets from the transmit descriptor.
+ *
+ * Also, tell the hardware to insert the port VLAN on transmit.
+ */
+static int
+__ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.port_based_outer_vlan = cpu_to_le16(vlan_info);
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
+ ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ }
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_set_outer_port_vlan - public version of __ice_vsi_set_outer_port_vlan
+ * @vsi: VSI to configure
+ * @vlan: ice_vlan structure used to set the port VLAN
+ *
+ * Set the outer port VLAN via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * This function does not support clearing the port VLAN as there is currently
+ * no use case for this.
+ *
+ * Use the ice_vlan structure passed in to set this VSI in a port VLAN.
+ */
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->prio > (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
new file mode 100644
index 000000000000..f459909490ec
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_LIB_H_
+#define _ICE_VSI_VLAN_LIB_H_
+
+#include <linux/types.h>
+#include "ice_vlan.h"
+
+struct ice_vsi;
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi);
+
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
new file mode 100644
index 000000000000..4a6c850d83ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_pf_vsi_vlan_ops.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_lib.h"
+#include "ice.h"
+
+static int
+op_unsupported_vlan_arg(struct ice_vsi * __always_unused vsi,
+ struct ice_vlan * __always_unused vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+op_unsupported_tpid_arg(struct ice_vsi *__always_unused vsi,
+ u16 __always_unused tpid)
+{
+ return -EOPNOTSUPP;
+}
+
+static int op_unsupported(struct ice_vsi *__always_unused vsi)
+{
+ return -EOPNOTSUPP;
+}
+
+/* If any new ops are added to the VSI VLAN ops interface then an unsupported
+ * implementation should be set here.
+ */
+static struct ice_vsi_vlan_ops ops_unsupported = {
+ .add_vlan = op_unsupported_vlan_arg,
+ .del_vlan = op_unsupported_vlan_arg,
+ .ena_stripping = op_unsupported_tpid_arg,
+ .dis_stripping = op_unsupported,
+ .ena_insertion = op_unsupported_tpid_arg,
+ .dis_insertion = op_unsupported,
+ .ena_rx_filtering = op_unsupported,
+ .dis_rx_filtering = op_unsupported,
+ .ena_tx_filtering = op_unsupported,
+ .dis_tx_filtering = op_unsupported,
+ .set_port_vlan = op_unsupported_vlan_arg,
+};
+
+/**
+ * ice_vsi_init_unsupported_vlan_ops - init all VSI VLAN ops to unsupported
+ * @vsi: VSI to initialize VSI VLAN ops to unsupported for
+ *
+ * By default all inner and outer VSI VLAN ops return -EOPNOTSUPP. This was done
+ * as oppsed to leaving the ops null to prevent unexpected crashes. Instead if
+ * an unsupported VSI VLAN op is called it will just return -EOPNOTSUPP.
+ *
+ */
+static void ice_vsi_init_unsupported_vlan_ops(struct ice_vsi *vsi)
+{
+ vsi->outer_vlan_ops = ops_unsupported;
+ vsi->inner_vlan_ops = ops_unsupported;
+}
+
+/**
+ * ice_vsi_init_vlan_ops - initialize type specific VSI VLAN ops
+ * @vsi: VSI to initialize ops for
+ *
+ * If any VSI types are added and/or require different ops than the PF or VF VSI
+ * then they will have to add a case here to handle that. Also, VSI type
+ * specific files should be added in the same manner that was done for PF VSI.
+ */
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ /* Initialize all VSI types to have unsupported VSI VLAN ops */
+ ice_vsi_init_unsupported_vlan_ops(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ ice_pf_vsi_init_vlan_ops(vsi);
+ break;
+ case ICE_VSI_VF:
+ ice_vf_vsi_init_vlan_ops(vsi);
+ break;
+ default:
+ dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
+ ice_vsi_type_str(vsi->type));
+ break;
+ }
+}
+
+/**
+ * ice_get_compat_vsi_vlan_ops - Get VSI VLAN ops based on VLAN mode
+ * @vsi: VSI used to get the VSI VLAN ops
+ *
+ * This function is meant to be used when the caller doesn't know which VLAN ops
+ * to use (i.e. inner or outer). This allows backward compatibility for VLANs
+ * since most of the Outer VSI VLAN functins are not supported when
+ * the device is configured in Single VLAN Mode (SVM).
+ */
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi)
+{
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return &vsi->outer_vlan_ops;
+ else
+ return &vsi->inner_vlan_ops;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
new file mode 100644
index 000000000000..5b47568f6256
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_OPS_H_
+#define _ICE_VSI_VLAN_OPS_H_
+
+#include "ice_type.h"
+#include "ice_vsi_vlan_lib.h"
+
+struct ice_vsi;
+
+struct ice_vsi_vlan_ops {
+ int (*add_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*del_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*ena_stripping)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_stripping)(struct ice_vsi *vsi);
+ int (*ena_insertion)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_insertion)(struct ice_vsi *vsi);
+ int (*ena_rx_filtering)(struct ice_vsi *vsi);
+ int (*dis_rx_filtering)(struct ice_vsi *vsi);
+ int (*ena_tx_filtering)(struct ice_vsi *vsi);
+ int (*dis_tx_filtering)(struct ice_vsi *vsi);
+ int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+};
+
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2388837d6d6c..88853a6ed931 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -327,6 +327,13 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
+ !is_power_of_2(vsi->tx_rings[qid]->count)) {
+ netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
@@ -349,6 +356,7 @@ xsk_pool_if_up:
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
+failure:
if (pool_failure) {
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
pool_present ? "en" : "dis", pool_failure);
@@ -359,33 +367,28 @@ xsk_pool_if_up:
}
/**
- * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
- * @rx_ring: Rx ring
+ * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
+ * @pool: XSK Buffer pool to pull the buffers from
+ * @xdp: SW ring of xdp_buff that will hold the buffers
+ * @rx_desc: Pointer to Rx descriptors that will be filled
* @count: The number of buffers to allocate
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
- * Returns true if all allocations were successful, false if any fail.
+ * Note that ring wrap should be handled by caller of this function.
+ *
+ * Returns the amount of allocated Rx descriptors
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union ice_32b_rx_flex_desc *rx_desc, u16 count)
{
- union ice_32b_rx_flex_desc *rx_desc;
- u16 ntu = rx_ring->next_to_use;
- struct xdp_buff **xdp;
- u32 nb_buffs, i;
dma_addr_t dma;
+ u16 buffs;
+ int i;
- rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = ice_xdp_buf(rx_ring, ntu);
-
- nb_buffs = min_t(u16, count, rx_ring->count - ntu);
- nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
- if (!nb_buffs)
- return false;
-
- i = nb_buffs;
- while (i--) {
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
@@ -394,13 +397,77 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp++;
}
+ return buffs;
+}
+
+/**
+ * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Place the @count of descriptors onto Rx ring. Handle the ring wrap
+ * for case where space from next_to_use up to the end of ring is less
+ * than @count. Finally do a tail bump.
+ *
+ * Returns true if all allocations were successful, false if any fail.
+ */
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 nb_buffs_extra = 0, nb_buffs;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ xdp = ice_xdp_buf(rx_ring, ntu);
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
+ rx_desc,
+ rx_ring->count - ntu);
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = ice_xdp_buf(rx_ring, 0);
+ ntu = 0;
+ count -= nb_buffs_extra;
+ ice_release_rx_desc(rx_ring, 0);
+ }
+
+ nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+
ntu += nb_buffs;
if (ntu == rx_ring->count)
ntu = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return total_count == (nb_buffs_extra + nb_buffs);
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Wrapper for internal allocation routine; figure out how many tail
+ * bumps should take place based on the given threshold
+ *
+ * Returns true if all calls to internal alloc routine succeeded
+ */
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
+ u16 batched, leftover, i, tail_bumps;
+
+ batched = ALIGN_DOWN(count, rx_thresh);
+ tail_bumps = batched / rx_thresh;
+ leftover = count & (rx_thresh - 1);
- return count == nb_buffs;
+ for (i = 0; i < tail_bumps; i++)
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ return false;
+ return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
}
/**
@@ -428,20 +495,24 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
xsk_buff_free(xdp);
return skb;
@@ -528,7 +599,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -583,9 +654,7 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
@@ -612,134 +681,221 @@ construct_skb:
}
/**
- * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
* @xdp_ring: XDP Tx ring
- * @budget: max number of frames to xmit
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ xdp_ring->xdp_tx_active--;
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
+ * @xdp_ring: XDP ring to clean
+ * @napi_budget: amount of descriptors that NAPI allows us to clean
*
- * Returns true if cleanup/transmission is done.
+ * Returns count of cleaned descriptors
*/
-static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
+static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
{
- struct ice_tx_desc *tx_desc = NULL;
- bool work_done = true;
- struct xdp_desc desc;
- dma_addr_t dma;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ int budget = napi_budget / tx_thresh;
+ u16 next_dd = xdp_ring->next_dd;
+ u16 ntc, cleared_dds = 0;
- while (likely(budget-- > 0)) {
+ do {
+ struct ice_tx_desc *next_dd_desc;
+ u16 desc_cnt = xdp_ring->count;
struct ice_tx_buf *tx_buf;
+ u32 xsk_frames;
+ u16 i;
- if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
- work_done = false;
+ next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
+ if (!(next_dd_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
break;
- }
- tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
-
- if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
- break;
-
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
- desc.len);
-
- tx_buf->bytecount = desc.len;
+ cleared_dds++;
+ xsk_frames = 0;
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = tx_thresh;
+ goto skip;
+ }
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz =
- ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
+ ntc = xdp_ring->next_to_clean;
- xdp_ring->next_to_use++;
- if (xdp_ring->next_to_use == xdp_ring->count)
- xdp_ring->next_to_use = 0;
- }
+ for (i = 0; i < tx_thresh; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_desc) {
- ice_xdp_ring_update_tail(xdp_ring);
- xsk_tx_release(xdp_ring->xsk_pool);
- }
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
+ }
- return budget > 0 && work_done;
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+skip:
+ xdp_ring->next_to_clean += tx_thresh;
+ if (xdp_ring->next_to_clean >= desc_cnt)
+ xdp_ring->next_to_clean -= desc_cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ next_dd_desc->cmd_type_offset_bsz = 0;
+ next_dd = next_dd + tx_thresh;
+ if (next_dd >= desc_cnt)
+ next_dd = tx_thresh - 1;
+ } while (budget--);
+
+ xdp_ring->next_dd = next_dd;
+
+ return cleared_dds * tx_thresh;
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
+ * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
+ * @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @desc: AF_XDP descriptor to pull the DMA address and length from
+ * @total_bytes: bytes accumulator that will be used for stats update
*/
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+ unsigned int *total_bytes)
{
- xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
+ struct ice_tx_desc *tx_desc;
+ dma_addr_t dma;
+
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, desc->len, 0);
+
+ *total_bytes += desc->len;
}
/**
- * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
- * @xdp_ring: XDP Tx ring
- * @budget: NAPI budget
- *
- * Returns true if cleanup/tranmission is done.
+ * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @total_bytes: bytes accumulator that will be used for stats update
*/
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ unsigned int *total_bytes)
{
- int total_packets = 0, total_bytes = 0;
- s16 ntc = xdp_ring->next_to_clean;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames = 0;
- bool xmit_done;
+ u32 i;
- tx_desc = ICE_TX_DESC(xdp_ring, ntc);
- tx_buf = &xdp_ring->tx_buf[ntc];
- ntc -= xdp_ring->count;
+ loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ dma_addr_t dma;
- do {
- if (!(tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
- total_bytes += tx_buf->bytecount;
- total_packets++;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, descs[i].len, 0);
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ *total_bytes += descs[i].len;
+ }
- tx_desc->cmd_type_offset_bsz = 0;
- tx_buf++;
- tx_desc++;
- ntc++;
+ xdp_ring->next_to_use = ntu;
- if (unlikely(!ntc)) {
- ntc -= xdp_ring->count;
- tx_buf = xdp_ring->tx_buf;
- tx_desc = ICE_TX_DESC(xdp_ring, 0);
- }
+ if (xdp_ring->next_to_use > xdp_ring->next_rs) {
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += tx_thresh;
+ }
+}
- prefetch(tx_desc);
+/**
+ * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @nb_pkts: count of packets to be send
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ u32 nb_pkts, unsigned int *total_bytes)
+{
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ u32 batched, leftover, i;
+
+ batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
+ leftover = nb_pkts & (PKTS_PER_BATCH - 1);
+ for (i = 0; i < batched; i += PKTS_PER_BATCH)
+ ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ for (; i < batched + leftover; i++)
+ ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+
+ if (xdp_ring->next_to_use > xdp_ring->next_rs) {
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += tx_thresh;
+ }
+}
- } while (likely(--budget));
+/**
+ * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @budget: number of free descriptors on HW Tx ring that can be used
+ * @napi_budget: amount of descriptors that NAPI allows us to clean
+ *
+ * Returns true if there is no more work that needs to be done, false otherwise
+ */
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
+{
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
+ u32 nb_pkts, nb_processed = 0;
+ unsigned int total_bytes = 0;
+
+ if (budget < tx_thresh)
+ budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ struct ice_tx_desc *tx_desc;
+
+ nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+ ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs = tx_thresh - 1;
+ xdp_ring->next_to_use = 0;
+ }
- ntc += xdp_ring->count;
- xdp_ring->next_to_clean = ntc;
+ ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
+ &total_bytes);
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ ice_xdp_ring_update_tail(xdp_ring);
+ ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
- ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
- xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
-
- return budget > 0 && xmit_done;
+ return nb_pkts < budget;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 4c7bd8e9dfc4..21faec8e97db 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -4,7 +4,16 @@
#ifndef _ICE_XSK_H_
#define _ICE_XSK_H_
#include "ice_txrx.h"
-#include "ice.h"
+
+#define PKTS_PER_BATCH 8
+
+#ifdef __clang__
+#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
+#elif __GNUC__ >= 8
+#define loop_unrolled_for _Pragma("GCC unroll 8") for
+#else
+#define loop_unrolled_for for
+#endif
struct ice_vsi;
@@ -12,13 +21,21 @@ struct ice_vsi;
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
#else
+static inline bool
+ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+ u32 __always_unused budget,
+ int __always_unused napi_budget)
+{
+ return false;
+}
+
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
struct xsk_buff_pool __always_unused *pool,
@@ -35,13 +52,6 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
- int __always_unused budget)
-{
- return false;
-}
-
-static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 51a2dcaf553d..2a5782063f4c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -965,10 +965,6 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
- /* Clear copied XDP RX-queue info */
- memset(&temp_ring[i].xdp_rxq, 0,
- sizeof(temp_ring[i].xdp_rxq));
-
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 38ba92022cd4..34b33b21e0dc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3164,8 +3164,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
+ int err;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -3180,17 +3180,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igb_driver_name);
@@ -3306,8 +3300,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -4352,7 +4345,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
- int size;
+ int size, res;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index, 0);
+ if (res < 0) {
+ dev_err(dev, "Failed to register xdp_rxq index %u\n",
+ rx_ring->queue_index);
+ return res;
+ }
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -4375,14 +4379,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = adapter->xdp_prog;
- /* XDP RX-queue info */
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0) < 0)
- goto err;
-
return 0;
err:
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 6580fcddb4be..02fec948ce64 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -165,23 +165,21 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
unsigned long flags;
u64 ns;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
case e1000_i354:
case e1000_i350:
spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
ns = timecounter_cyc2time(&adapter->tc, systim);
-
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ns_to_ktime(ns);
break;
case e1000_i210:
case e1000_i211:
- memset(hwtstamps, 0, sizeof(*hwtstamps));
/* Upper 32 bits contain s, lower 32 bits contain ns. */
hwtstamps->hwtstamp = ktime_set(systim >> 32,
systim & 0xFFFFFFFF);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b78407289741..43ced78c3a2e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2684,25 +2684,18 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
-
static int cards_found;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, igbvf_driver_name);
@@ -2783,10 +2776,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IGBVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2f17f36e94fd..74b2c590ed5d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -505,6 +505,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
u8 index = rx_ring->queue_index;
int size, desc_len, res;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
rx_ring->q_vector->napi.napi_id);
if (res < 0) {
@@ -2446,19 +2449,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int totalsize = metasize + datasize;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
- memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
@@ -6251,23 +6255,17 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igc_driver_name);
@@ -6367,8 +6365,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 99d481904ce6..affdefcca7e3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -361,7 +361,6 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev = NULL;
struct ixgb_adapter *adapter;
static int cards_found = 0;
- int pci_using_dac;
u8 addr[ETH_ALEN];
int i;
int err;
@@ -370,16 +369,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -444,10 +437,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_RXCSUM;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 16114 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4a69823e6abd..921a4d977d65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -177,11 +177,14 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ int link_enable;
+ int link_state;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
int xcast_mode;
unsigned int vf_api;
+ u8 primary_abort_count;
};
enum ixgbevf_xcast_modes {
@@ -556,6 +559,8 @@ struct ixgbe_mac_addr {
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
+#define IXGBE_PRIMARY_ABORT_LIMIT 5
+
/* board specific private data structure */
struct ixgbe_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -614,6 +619,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
+#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
/* Tx fast path data */
int num_tx_queues;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index e90b5047e695..4c26c4b92f07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -30,7 +30,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -746,10 +746,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
usleep_range(1000, 2000);
/*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -2506,15 +2506,15 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else 0
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else 0
+ * is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2523,23 +2523,23 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Poll for bit to read as set */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
break;
usleep_range(100, 120);
}
- if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
+ if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) {
hw_dbg(hw, "GIO disable did not set - requesting resets\n");
goto gio_disable_fail;
}
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
ixgbe_removed(hw->hw_addr))
return 0;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
udelay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
return 0;
@@ -2547,13 +2547,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n");
gio_disable_fail:
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
@@ -2575,7 +2575,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f70967c32116..628d0eb0599f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -138,6 +138,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
"legacy-rx",
#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
"vf-ipsec",
+#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
+ "mdd-disable-vf",
};
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
@@ -3510,6 +3512,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
+ if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
+ priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
+
return priv_flags;
}
@@ -3517,6 +3522,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
+ unsigned int i;
flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
@@ -3526,6 +3532,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
+ flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
+ if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ /* Reset primary abort counter */
+ for (i = 0; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].primary_abort_count = 0;
+
+ flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+ } else {
+ e_info(probe,
+ "Cannot set private flags: Operation not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flags2 != adapter->flags2) {
adapter->flags2 = flags2;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 89b467006291..c4a4954aa317 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5687,6 +5687,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
@@ -5948,8 +5951,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_SFP_NOT_PRESENT:
case IXGBE_ERR_SFP_NOT_SUPPORTED:
break;
- case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- e_dev_err("master disable timed out\n");
+ case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
+ e_dev_err("primary disable timed out\n");
break;
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
@@ -6144,11 +6147,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
for (i = 0 ; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = false;
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
/* disable transmits in the hardware now that interrupts are off */
@@ -7613,6 +7613,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
#ifdef CONFIG_PCI_IOV
+static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+ adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) {
+ adapter->vfinfo[vf].primary_abort_count++;
+ if (adapter->vfinfo[vf].primary_abort_count ==
+ IXGBE_PRIMARY_ABORT_LIMIT) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->vfinfo[vf].primary_abort_count = 0;
+
+ e_info(drv,
+ "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on",
+ hw->bus.func, vf,
+ adapter->vfinfo[vf].vf_mac_addresses);
+ }
+ }
+}
+
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -7644,8 +7665,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
continue;
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
- status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ status_reg & PCI_STATUS_REC_MASTER_ABORT) {
+ ixgbe_bad_vf_abort(adapter, vf);
pcie_flr(vfdev);
+ }
}
}
@@ -10284,6 +10307,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
@@ -10632,9 +10656,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
- int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
+ int i, err, expected_gts;
bool disable_dev = false;
#ifdef IXGBE_FCOE
u16 device_caps;
@@ -10654,16 +10678,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, ixgbe_driver_name);
@@ -10750,6 +10769,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -10861,8 +10883,7 @@ skip_sriov:
netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a148534d7256..8f4316b19278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 214a38de3f41..7f11c0a8e7a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
for (i = 0; i < num_vfs; i++) {
/* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
+ adapter->vfinfo[i].link_enable = true;
/* We support VF RSS querying only for 82599 and x540
* devices at the moment. These devices share RSS
@@ -820,6 +821,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
}
}
+/**
+ * ixgbe_set_vf_rx_tx - Set VF rx tx
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ *
+ * Set or reset correct transmit and receive for vf
+ **/
+static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
+{
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+
+ if (adapter->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | 1 << vf_shift;
+ reg_req_rx = reg_cur_rx | 1 << vf_shift;
+ } else {
+ reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#if IS_ENABLED(CONFIG_FCOE)
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif /* CONFIG_FCOE */
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* Enable/Disable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
+}
+
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -845,11 +897,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= BIT(vf_shift);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
/* force drop enable for all VF Rx queues */
reg = IXGBE_QDE_ENABLE;
if (adapter->vfinfo[vf].pf_vlan)
@@ -857,27 +904,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_qde(adapter, vf, reg);
- /* enable receive for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= BIT(vf_shift);
- /*
- * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
- * For more info take a look at ixgbe_set_vf_lpe
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- struct net_device *dev = adapter->netdev;
- int pf_max_frame = dev->mtu + ETH_HLEN;
-
-#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
- pf_max_frame = max_t(int, pf_max_frame,
- IXGBE_FCOE_JUMBO_FRAME_SIZE);
-
-#endif /* CONFIG_FCOE */
- if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~BIT(vf_shift);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ ixgbe_set_vf_rx_tx(adapter, vf);
/* enable VF mailbox for further messages */
adapter->vfinfo[vf].clear_to_send = true;
@@ -1202,6 +1229,26 @@ out:
return 0;
}
+static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 *link_state = &msgbuf[1];
+
+ /* verify the PF is supporting the correct API */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *link_state = adapter->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1267,6 +1314,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_UPDATE_XCAST_MODE:
retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_LINK_STATE:
+ retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
+ break;
case IXGBE_VF_IPSEC_ADD:
retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
break;
@@ -1322,18 +1372,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
}
}
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* disable transmit and receive for all vfs */
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
-
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
-}
-
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1359,6 +1397,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_set_all_vfs - update vfs queues
+ * @adapter: Pointer to adapter struct
+ *
+ * Update setting transmit and receive queues for all vfs
+ **/
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ ixgbe_set_vf_link_state(adapter, i,
+ adapter->vfinfo[i].link_state);
+}
+
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -1656,6 +1709,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
return 0;
}
+/**
+ * ixgbe_set_vf_link_state - Set link state
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set a link force state on/off a single vf
+ **/
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
+{
+ adapter->vfinfo[vf].link_state = state;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ adapter->vfinfo[vf].link_enable = false;
+ else
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ adapter->vfinfo[vf].link_enable = false;
+ break;
+ }
+
+ ixgbe_set_vf_rx_tx(adapter, vf);
+
+ /* restart the VF */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+}
+
+/**
+ * ixgbe_ndo_set_vf_link_state - Set link state
+ * @netdev: network interface device structure
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (vf < 0 || vf >= adapter->num_vfs) {
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF link - invalid VF identifier %d\n", vf);
+ return -EINVAL;
+ }
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state %d - not supported\n",
+ vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state disable\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state auto\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF %d - invalid link state %d\n", vf, state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 3ec21923c89c..0690ecb8dfa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos, __be16 vlan_proto);
@@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2647937f7f4d..6da9880d766a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1247,7 +1247,7 @@ struct ixgbe_nvm_version {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1811,7 +1811,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2193,8 +2193,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -3671,7 +3671,7 @@ struct ixgbe_info {
#define IXGBE_ERR_ADAPTER_STOPPED -9
#define IXGBE_ERR_INVALID_MAC_ADDR -10
#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
#define IXGBE_ERR_RESET_FAILED -15
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 6a5e9cf6b5da..dd7ff66d422f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -207,26 +207,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
}
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
+ const struct xdp_buff *xdp)
{
- unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
- unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- bi->xdp->data_end - bi->xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
- xsk_buff_free(bi->xdp);
- bi->xdp = NULL;
return skb;
}
@@ -317,12 +319,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
}
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+
cleaned_count++;
ixgbe_inc_ntc(rx_ring);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e257390a4f6a..149c733fcc2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -387,6 +387,8 @@ struct ixgbevf_adapter {
u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
+ bool link_state;
+
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
#ifdef CONFIG_XFRM
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0f293acd17e8..55b87bc3a938 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2298,7 +2298,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ bool state;
ixgbevf_configure_msix(adapter);
@@ -2311,6 +2313,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ state = adapter->link_state;
+ hw->mac.ops.get_link_state(hw, &adapter->link_state);
+ if (state && state != adapter->link_state)
+ dev_info(&pdev->dev, "VF is administratively disabled\n");
+
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -2753,7 +2760,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
ring->reg_idx = reg_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
@@ -3081,6 +3088,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+ adapter->link_state = true;
+
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -3313,7 +3322,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_watchdog_update_link(adapter);
- if (adapter->link_up)
+ if (adapter->link_up && adapter->link_state)
ixgbevf_watchdog_link_is_up(adapter);
else
ixgbevf_watchdog_link_is_down(adapter);
@@ -4512,22 +4521,17 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
- int err, pci_using_dac;
bool disable_dev = false;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, ixgbevf_driver_name);
@@ -4607,10 +4611,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IXGBEVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_SG |
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 7346ccf014a5..835bbcc5cc8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -100,6 +100,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61d8970c6d1d..68fc32e36e88 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -585,6 +585,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
}
/**
+ * ixgbevf_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ */
+static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ u32 msgbuf[2];
+ s32 ret_val;
+ s32 err;
+
+ msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+ msgbuf[1] = 0x0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ ret_val = IXGBE_ERR_MBX;
+ } else {
+ ret_val = 0;
+ *link_state = msgbuf[1];
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @link_state: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -968,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
+ .get_link_state = ixgbevf_get_link_state_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
.set_rlpml = ixgbevf_set_rlpml_vf,
@@ -985,6 +1026,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
.set_rar = ixgbevf_hv_set_rar_vf,
.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .get_link_state = ixgbevf_hv_get_link_state_vf,
.set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
.set_vfta = ixgbevf_hv_set_vfta_vf,
.set_rlpml = ixgbevf_hv_set_rlpml_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 54158dac8707..b4eef5b6c172 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -39,6 +39,7 @@ struct ixgbe_mac_operations {
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+ s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 439674fc9765..b6c5122da995 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -28,6 +28,7 @@
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include "jme.h"
@@ -2179,7 +2180,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
}
if (unlikely(txbi->start_xmit &&
- (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) &&
txbi->skb)) {
netif_stop_queue(jme->dev);
netif_info(jme, tx_queued, jme->dev,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 143ca8be5eb5..5f9ab1842d49 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1661,7 +1661,7 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
if (er->rx_mini_pending || er->rx_jumbo_pending)
return -EINVAL;
- mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
+ mp->rx_ring_size = min(er->rx_pending, 4096U);
mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != er->tx_pending)
@@ -3092,8 +3092,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct mv643xx_eth_private *mp;
struct net_device *dev;
struct phy_device *phydev = NULL;
- struct resource *res;
- int err;
+ int err, irq;
pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
@@ -3189,9 +3188,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (WARN_ON(irq < 0)) {
+ err = irq;
+ goto out;
+ }
+ dev->irq = irq;
dev->netdev_ops = &mv643xx_eth_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 83c8908f0cc7..934f6dd90992 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -76,6 +76,8 @@
#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_AC5_CNM_DDR_TARGET 0x2
+#define MVNETA_AC5_CNM_DDR_ATTR 0xb
#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
#define MVNETA_PORT_CONFIG 0x2400
#define MVNETA_UNI_PROMISC_MODE BIT(0)
@@ -544,6 +546,7 @@ struct mvneta_port {
/* Flags for special SoC configurations */
bool neta_armada3700;
+ bool neta_ac5;
u16 rx_offset_correction;
const struct mbus_dram_target_info *dram_target_info;
};
@@ -1884,8 +1887,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
bytes_compl += buf->skb->len;
pkts_compl++;
dev_kfree_skb_any(buf->skb);
- } else if (buf->type == MVNETA_TYPE_XDP_TX ||
- buf->type == MVNETA_TYPE_XDP_NDO) {
+ } else if ((buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
@@ -2060,61 +2063,104 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct xdp_buff *xdp, struct skb_shared_info *sinfo,
- int sync_len)
+ struct xdp_buff *xdp, int sync_len)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), true);
+
+out:
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
sync_len, true);
}
static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
- struct xdp_frame *xdpf, bool dma_map)
+ struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct device *dev = pp->dev->dev.parent;
struct mvneta_tx_desc *tx_desc;
- struct mvneta_tx_buf *buf;
- dma_addr_t dma_addr;
+ int i, num_frames = 1;
+ struct page *page;
+
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ num_frames += sinfo->nr_frags;
- if (txq->count >= txq->tx_stop_threshold)
+ if (txq->count + num_frames >= txq->size)
return MVNETA_XDP_DROPPED;
- tx_desc = mvneta_txq_next_desc_get(txq);
+ for (i = 0; i < num_frames; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = NULL;
+ int len = xdpf->len;
+ dma_addr_t dma_addr;
- buf = &txq->buf[txq->txq_put_index];
- if (dma_map) {
- /* ndo_xdp_xmit */
- dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
- xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
- mvneta_txq_desc_put(txq);
- return MVNETA_XDP_DROPPED;
+ if (unlikely(i)) { /* paged area */
+ frag = &sinfo->frags[i - 1];
+ len = skb_frag_size(frag);
}
- buf->type = MVNETA_TYPE_XDP_NDO;
- } else {
- struct page *page = virt_to_page(xdpf->data);
- dma_addr = page_pool_get_dma_addr(page) +
- sizeof(*xdpf) + xdpf->headroom;
- dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
- xdpf->len, DMA_BIDIRECTIONAL);
- buf->type = MVNETA_TYPE_XDP_TX;
- }
- buf->xdpf = xdpf;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ void *data;
+
+ data = unlikely(frag) ? skb_frag_address(frag)
+ : xdpf->data;
+ dma_addr = dma_map_single(dev, data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto unmap;
+ }
- tx_desc->command = MVNETA_TXD_FLZ_DESC;
- tx_desc->buf_phys_addr = dma_addr;
- tx_desc->data_size = xdpf->len;
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag)
+ : virt_to_page(xdpf->data);
+ dma_addr = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma_addr += skb_frag_off(frag);
+ else
+ dma_addr += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dev, dma_addr, len,
+ DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = unlikely(i) ? NULL : xdpf;
- mvneta_txq_inc_put(txq);
- txq->pending++;
- txq->count++;
+ tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = len;
+ *nxmit_byte += len;
+
+ mvneta_txq_inc_put(txq);
+ }
+ /*last descriptor */
+ tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->pending += num_frames;
+ txq->count += num_frames;
return MVNETA_XDP_TX;
+
+unmap:
+ for (i--; i >= 0; i--) {
+ mvneta_txq_desc_put(txq);
+ tx_desc = txq->descs + txq->next_desc_to_proc;
+ dma_unmap_single(dev, tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ }
+
+ return MVNETA_XDP_DROPPED;
}
static int
@@ -2123,8 +2169,8 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu, nxmit_byte = 0;
struct xdp_frame *xdpf;
- int cpu;
u32 ret;
xdpf = xdp_convert_buff_to_frame(xdp);
@@ -2136,10 +2182,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, cpu);
- ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
if (ret == MVNETA_XDP_TX) {
u64_stats_update_begin(&stats->syncp);
- stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_bytes += nxmit_byte;
stats->es.ps.tx_packets++;
stats->es.ps.xdp_tx++;
u64_stats_update_end(&stats->syncp);
@@ -2178,11 +2224,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
- ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
+ true);
if (ret != MVNETA_XDP_TX)
break;
- nxmit_byte += frames[i]->len;
nxmit++;
}
@@ -2205,7 +2251,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync;
u32 ret, act;
@@ -2226,7 +2271,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
} else {
ret = MVNETA_XDP_REDIR;
@@ -2237,7 +2282,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
break;
default:
bpf_warn_invalid_xdp_action(pp->dev, prog, act);
@@ -2246,7 +2291,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
fallthrough;
case XDP_DROP:
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2269,7 +2314,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
- struct skb_shared_info *sinfo;
if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2289,11 +2333,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
+ xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
data_len, false);
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = 0;
}
static void
@@ -2301,9 +2343,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct skb_shared_info *xdp_sinfo,
struct page *page)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int data_len, len;
@@ -2321,25 +2363,25 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir);
rx_desc->buf_phys_addr = 0;
- if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
- skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
+ if (!xdp_buff_has_frags(xdp))
+ sinfo->nr_frags = 0;
+
+ if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->xdp_frags_size = *size;
+ xdp_buff_set_frags_flag(xdp);
+ }
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
*size -= len;
}
@@ -2348,8 +2390,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- int i, num_frags = sinfo->nr_frags;
struct sk_buff *skb;
+ u8 num_frags;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ num_frags = sinfo->nr_frags;
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
@@ -2361,13 +2406,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_put(skb, xdp->data_end - xdp->data);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- skb_frag_page(frag), skb_frag_off(frag),
- skb_frag_size(frag), PAGE_SIZE);
- }
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -2379,7 +2422,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev;
- struct skb_shared_info sinfo;
struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz;
@@ -2388,8 +2430,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
-
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2431,7 +2471,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
- &size, &sinfo, page);
+ &size, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2439,7 +2479,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue;
if (size) {
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
goto next;
}
@@ -2451,7 +2491,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
@@ -2468,11 +2508,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb);
next:
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
}
if (xdp_buf.data_hard_start)
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
xdp_do_flush_map();
@@ -3260,7 +3299,8 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
+ PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -3740,6 +3780,7 @@ static void mvneta_percpu_disable(void *arg)
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *prog = pp->xdp_prog;
int ret;
if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
@@ -3748,8 +3789,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
- if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
- netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ if (prog && !prog->aux->xdp_has_frags &&
+ mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
+ mtu);
+
return -EINVAL;
}
@@ -3969,6 +4013,15 @@ static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
.pcs_an_restart = mvneta_pcs_an_restart,
};
+static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return &pp->phylink_pcs;
+}
+
static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -4169,13 +4222,14 @@ static void mvneta_mac_link_up(struct phylink_config *config,
mvneta_port_up(pp);
if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ pp->eee_active = phy_init_eee(phy, false) >= 0;
mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
}
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = mvneta_mac_select_pcs,
.mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
.mac_finish = mvneta_mac_finish,
@@ -4490,8 +4544,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct mvneta_port *pp = netdev_priv(dev);
struct bpf_prog *old_prog;
- if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ if (prog && !prog->aux->xdp_has_frags &&
+ dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
return -EOPNOTSUPP;
}
@@ -5272,6 +5327,10 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
win_protect |= 3 << (2 * i);
}
} else {
+ if (pp->neta_ac5)
+ mvreg_write(pp, MVNETA_WIN_BASE(0),
+ (MVNETA_AC5_CNM_DDR_ATTR << 8) |
+ MVNETA_AC5_CNM_DDR_TARGET);
/* For Armada3700 open default 4GB Mbus window, leaving
* arbitration of target/attribute to a different layer
* of configuration.
@@ -5321,26 +5380,66 @@ static int mvneta_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->irq = irq_of_parse_and_map(dn, 0);
- if (dev->irq == 0)
- return -EINVAL;
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
+
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+ pp->dn = dn;
+
+ pp->rxq_def = rxq_def;
+ pp->indir[0] = rxq_def;
err = of_get_phy_mode(dn, &phy_mode);
if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- goto err_free_irq;
+ return err;
}
+ pp->phy_interface = phy_mode;
+
comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
- if (comphy == ERR_PTR(-EPROBE_DEFER)) {
- err = -EPROBE_DEFER;
- goto err_free_irq;
- } else if (IS_ERR(comphy)) {
+ if (comphy == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(comphy))
comphy = NULL;
+
+ pp->comphy = comphy;
+
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pp->base))
+ return PTR_ERR(pp->base);
+
+ /* Get special SoC configurations */
+ if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
+ pp->neta_armada3700 = true;
+ if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
+ pp->neta_armada3700 = true;
+ pp->neta_ac5 = true;
}
- pp = netdev_priv(dev);
- spin_lock_init(&pp->lock);
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0)
+ return -EINVAL;
+
+ pp->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pp->clk))
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_free_irq;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
@@ -5377,55 +5476,16 @@ static int mvneta_probe(struct platform_device *pdev)
phy_mode, &mvneta_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_free_irq;
- }
-
- dev->tx_queue_len = MVNETA_MAX_TXD;
- dev->watchdog_timeo = 5 * HZ;
- dev->netdev_ops = &mvneta_netdev_ops;
-
- dev->ethtool_ops = &mvneta_eth_tool_ops;
-
- pp->phylink = phylink;
- pp->comphy = comphy;
- pp->phy_interface = phy_mode;
- pp->dn = dn;
-
- pp->rxq_def = rxq_def;
- pp->indir[0] = rxq_def;
-
- /* Get special SoC configurations */
- if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
- pp->neta_armada3700 = true;
-
- pp->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(pp->clk))
- pp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pp->clk)) {
- err = PTR_ERR(pp->clk);
- goto err_free_phylink;
- }
-
- clk_prepare_enable(pp->clk);
-
- pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(pp->clk_bus))
- clk_prepare_enable(pp->clk_bus);
-
- pp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pp->base)) {
- err = PTR_ERR(pp->base);
goto err_clk;
}
- pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- phylink_set_pcs(phylink, &pp->phylink_pcs);
+ pp->phylink = phylink;
/* Alloc per-cpu port structure */
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
if (!pp->ports) {
err = -ENOMEM;
- goto err_clk;
+ goto err_free_phylink;
}
/* Alloc per-cpu stats */
@@ -5569,12 +5629,12 @@ err_netdev:
free_percpu(pp->stats);
err_free_ports:
free_percpu(pp->ports);
-err_clk:
- clk_disable_unprepare(pp->clk_bus);
- clk_disable_unprepare(pp->clk);
err_free_phylink:
if (pp->phylink)
phylink_destroy(pp->phylink);
+err_clk:
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
err_free_irq:
irq_dispose_mapping(dev->irq);
return err;
@@ -5720,6 +5780,7 @@ static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
{ .compatible = "marvell,armada-3700-neta" },
+ { .compatible = "marvell,armada-ac5-neta" },
{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3631d612aaca..25491edc35ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -578,31 +578,78 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
}
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -722,26 +769,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
-{
- struct cgx *cgx = cgxd;
- u64 cfg;
-
- if (is_dev_rpm(cgx))
- return 0;
-
- if (!is_lmac_valid(cgx, lmac_id))
- return -ENODEV;
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
- return 0;
-}
-
static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause)
{
@@ -782,21 +809,8 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
if (!is_lmac_valid(cgx, lmac_id))
return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
-
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -813,21 +827,120 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
}
+
+ if (tx_pause)
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ else
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -1489,6 +1602,16 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1505,6 +1628,10 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
rvu_free_bitmap(&lmac->mac_to_index_bmap);
err_name_free:
kfree(lmac->name);
@@ -1572,6 +1699,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
.mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
.mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index ab1e4abdea38..bd2f33a26eee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -76,6 +76,13 @@
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
@@ -172,4 +179,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index b33e7d1d0851..f30581bf0688 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -17,6 +17,8 @@
* @resp: command response
* @link_info: link related information
* @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cgx: parent cgx port
@@ -33,6 +35,8 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
@@ -110,6 +114,12 @@ struct mac_ops {
int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 58e2aeebc14f..550cb11197bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -172,6 +172,8 @@ M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -609,6 +611,21 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
struct npc_set_pkind {
struct mbox_msghdr hdr;
#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
@@ -1603,6 +1620,8 @@ enum cgx_af_status {
LMAC_AF_ERR_INVALID_PARAM = -1101,
LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index e682b7bfde64..67a6821d2dff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -25,6 +25,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
@@ -46,10 +49,105 @@
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+
+#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -77,8 +175,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -100,15 +198,22 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
@@ -117,7 +222,7 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
static int ptp_get_clock(struct ptp *ptp, u64 *clk)
{
/* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ *clk = ptp->read_ptp_tstmp(ptp);
return 0;
}
@@ -166,7 +271,11 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+
/* Initial compensation value to start the nanosecs counter */
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
}
@@ -214,6 +323,12 @@ static int ptp_probe(struct pci_dev *pdev,
if (!first_ptp_block)
first_ptp_block = ptp;
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+
return 0;
error_free:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 1b81a0493cd3..95a955159f40 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -15,6 +15,8 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
u32 clock_rate;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 9ea2f6ac38ec..47e83d7a5804 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -32,6 +32,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
.mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
.mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -96,11 +98,20 @@ int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
+ struct lmac *lmac;
u64 cfg;
if (!rpm)
return;
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
if (enable) {
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
@@ -122,13 +133,94 @@ int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
return -ENODEV;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
return 0;
}
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
@@ -152,8 +244,12 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for 802.3X frames */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
} else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
}
@@ -166,56 +262,20 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_t *rpm = rpmd;
u64 cfg;
- if (enable) {
- /* Enable 802.3 pause frame mode */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable receive pause frames */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Set pause time and interval */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
- cfg | RPM_DEFAULT_PAUSE_TIME);
- /* Set pause interval as the hardware default is too short */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
- cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
-
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- }
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -323,3 +383,65 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
cfg &= ~RPMX_RX_TS_PREPEND;
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ /* reset PFC class quanta and threshold */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index ff580311edd0..9ab8d49dd180 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -33,7 +33,21 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
@@ -45,6 +59,18 @@
#define RPM_LMAC_FWI 0xa
#define RPM_TX_EN BIT_ULL(0)
#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -61,4 +87,8 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5ed94cfb47d2..513b43ecd5be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -807,6 +807,9 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 8a7ac5a8b821..9ffe99830e34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -863,6 +863,45 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
@@ -870,11 +909,9 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
void *cgxd;
- if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
- return 0;
-
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -886,13 +923,11 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
mac_ops = get_mac_ops(cgxd);
if (req->set)
- mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
- &rsp->tx_pause,
- &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
}
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
@@ -1079,3 +1114,67 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 97fb61915379..0fa625e2528e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -296,7 +296,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
- struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
@@ -326,13 +325,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
- mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
-
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
- rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -533,7 +525,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -4578,6 +4570,12 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
pfvf->hw_rx_tstamp_en = false;
}
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
@@ -5314,6 +5312,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
aq_req.op = NIX_AQ_INSTOP_WRITE;
memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
/* Clear higher layer enable bit in the mid profile, just in case */
aq_req.prof.hl_en = 0;
aq_req.prof_mask.hl_en = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 0048b5946712..d463dc72d80a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -11,4 +11,7 @@ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 66da31f30d3e..b9d7601138ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -222,8 +222,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -233,6 +236,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Use max receive length supported by hardware for loopback devices */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return err;
@@ -262,6 +269,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
@@ -931,7 +939,11 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (!is_otx2_lbkvf(pfvf->pdev)) {
/* Enable receive CQ backpressure */
aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
aq->cq.bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level is same as cq pass level */
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
@@ -1036,7 +1048,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -1049,7 +1061,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1211,7 +1223,11 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1538,11 +1554,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1704,6 +1727,56 @@ out:
}
EXPORT_SYMBOL(otx2_get_max_mtu);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 14509fc64cce..c587c14ac2a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -17,6 +17,7 @@
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
+#include <linux/time64.h>
#include <mbox.h>
#include <npc.h>
@@ -178,6 +179,10 @@ struct otx2_hw {
u16 rqpool_cnt;
u16 sqpool_cnt;
+#define OTX2_DEFAULT_RBUF_LEN 2048
+ u16 rbuf_len;
+ u32 xqe_size;
+
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
@@ -272,6 +277,8 @@ struct otx2_ptp {
u64 thresh;
struct ptp_pin_desc extts_config;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -396,6 +403,11 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -863,6 +875,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -876,4 +890,11 @@ int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+#endif
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..723d2506d309
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d85db90632d6..fc328de5345e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -371,6 +371,8 @@ static void otx2_get_ringparam(struct net_device *netdev,
ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+ kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
+ kernel_ring->cqe_size = pfvf->hw.xqe_size;
}
static int otx2_set_ringparam(struct net_device *netdev,
@@ -379,6 +381,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u32 rx_buf_len = kernel_ring->rx_buf_len;
+ u32 old_rx_buf_len = pfvf->hw.rbuf_len;
+ u32 xqe_size = kernel_ring->cqe_size;
bool if_up = netif_running(netdev);
struct otx2_qset *qs = &pfvf->qset;
u32 rx_count, tx_count;
@@ -386,6 +391,21 @@ static int otx2_set_ringparam(struct net_device *netdev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
+ netdev_err(netdev,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ if (xqe_size != 128 && xqe_size != 512) {
+ netdev_err(netdev,
+ "Completion event size must be 128 or 512");
+ return -EINVAL;
+ }
+
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
@@ -403,7 +423,8 @@ static int otx2_set_ringparam(struct net_device *netdev,
Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
- if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
+ rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
return 0;
if (if_up)
@@ -413,6 +434,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
+ pfvf->hw.rbuf_len = rx_buf_len;
+ pfvf->hw.xqe_size = xqe_size;
+
if (if_up)
return netdev->netdev_ops->ndo_open(netdev);
@@ -1207,6 +1231,8 @@ end:
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1326,6 +1352,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 77a13fb555fb..54f235c216a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -21,8 +21,10 @@ struct otx2_flow {
u16 entry;
bool is_vf;
u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
int vf;
- bool dmac_filter;
};
enum dmac_req {
@@ -899,6 +901,9 @@ static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
struct npc_install_flow_req *req;
int err, vf = 0;
@@ -940,6 +945,24 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
mutex_unlock(&pfvf->mbox.lock);
return -EINVAL;
}
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
}
/* ethtool ring_cookie has (VF + 1) for VF */
@@ -951,6 +974,12 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -966,7 +995,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
return -ENOMEM;
pf_mac->entry = 0;
- pf_mac->dmac_filter = true;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
@@ -1031,7 +1060,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* Sync dmac filter table with updated fields */
- if (flow->dmac_filter)
+ if (flow->rule_type & DMAC_FILTER_RULE)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
@@ -1052,7 +1081,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!test_bit(0, &flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
- flow->dmac_filter = true;
+ flow->rule_type |= DMAC_FILTER_RULE;
flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
@@ -1120,7 +1149,7 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
bool found = false;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter && iter->entry == 0) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
@@ -1156,7 +1185,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
if (!flow)
return -ENOENT;
- if (flow->dmac_filter) {
+ if (flow->rule_type & DMAC_FILTER_RULE) {
struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* user not allowed to remove dmac filter with interface mac */
@@ -1174,6 +1203,13 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
}
@@ -1383,7 +1419,7 @@ void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
struct ethhdr *eth_hdr;
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
otx2_dmacflt_add(pf, eth_hdr->h_dest,
iter->entry);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index d39341e4ab37..441aafc26a08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1311,6 +1311,9 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
int total_size;
int rbuf_size;
+ if (pf->hw.rbuf_len)
+ return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
/* The data transferred by NIX to memory consists of actual packet
* plus additional data which has timestamp and/or EDSA/HIGIG2
* headers if interface is configured in corresponding modes.
@@ -1694,9 +1697,6 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
-
/* Install DMAC Filters */
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
@@ -1863,9 +1863,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
- bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
- bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -1875,46 +1873,7 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
- if ((changed & NETIF_F_NTUPLE) && !ntuple)
- otx2_destroy_ntuple_flows(pf);
-
- if ((changed & NETIF_F_NTUPLE) && ntuple) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && !tc &&
- pf->flow_cfg && pf->flow_cfg->nr_flows) {
- netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
- return -EBUSY;
- }
-
- if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
- netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
- return -EINVAL;
- }
-
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -2625,6 +2584,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2778,9 +2740,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
@@ -2925,6 +2889,21 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 61c20907315f..fdc2c9315b91 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -294,6 +294,14 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
pfvf->ptp = ptp_ptr;
error:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 6ff284211d7b..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -8,6 +8,21 @@
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 626961a41089..28b19945d716 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -58,7 +58,7 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
- if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
+ if (!nic->flow_cfg->max_flows)
return 0;
/* Max flows changed, free the existing bitmap */
@@ -190,6 +190,40 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic,
return 0;
}
+static int otx2_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
@@ -212,6 +246,10 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
entry = &cls->rule->action.entries[0];
switch (entry->id) {
case FLOW_ACTION_POLICE:
+ err = otx2_policer_validate(&cls->rule->action, entry, extack);
+ if (err)
+ return err;
+
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
@@ -315,6 +353,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
u8 nr_police = 0;
bool pps = false;
u64 rate;
+ int err;
int i;
if (!flow_action_has_entries(flow_action)) {
@@ -355,6 +394,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
+ err = otx2_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
if (act->police.rate_bytes_ps > 0) {
rate = act->police.rate_bytes_ps * 8;
burst = act->police.burst;
@@ -1023,6 +1066,7 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc);
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
@@ -1052,6 +1096,7 @@ int otx2_init_tc(struct otx2_nic *nic)
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
+EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
@@ -1060,3 +1105,4 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7c4068c5d1ac..c26de15b2ac3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -148,6 +148,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (!err) {
memset(&ts, 0, sizeof(ts));
@@ -167,14 +168,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
+ u64 timestamp, tsns;
int err;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (err)
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 925b74ebb8b0..9e87836ed8bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -472,23 +472,7 @@ static void otx2vf_reset_task(struct work_struct *work)
static int otx2vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ netdev->features;
- bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
- struct otx2_nic *vf = netdev_priv(netdev);
-
- if (changed & NETIF_F_NTUPLE) {
- if (!ntuple_enabled) {
- otx2_mcam_flow_del(vf);
- return 0;
- }
-
- if (!otx2_get_maxflows(vf->flow_cfg)) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static const struct net_device_ops otx2vf_netdev_ops = {
@@ -502,6 +486,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_eth_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -586,6 +571,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->tot_tx_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -662,6 +650,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -697,16 +686,24 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_unreg_netdev;
- err = otx2_register_dl(vf);
+ err = otx2_init_tc(vf);
if (err)
goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
err_ptp_destroy:
@@ -739,6 +736,22 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
cancel_work_sync(&vf->reset_task);
otx2_unregister_dl(vf);
unregister_netdev(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2fd9ef2fe5d6..6f754ae2a584 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -281,8 +281,11 @@ struct prestera_router {
struct prestera_switch *sw;
struct list_head vr_list;
struct list_head rif_entry_list;
+ struct rhashtable fib_ht;
+ struct rhashtable kern_fib_cache_ht;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
+ struct notifier_block fib_nb;
};
struct prestera_rxtx_params {
@@ -325,6 +328,8 @@ int prestera_port_cfg_mac_write(struct prestera_port *port,
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+void prestera_queue_work(struct work_struct *work);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index f0d9f592173b..47c899c08951 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -22,6 +22,7 @@ struct prestera_acl {
struct prestera_acl_ruleset_ht_key {
struct prestera_flow_block *block;
+ u32 chain_index;
};
struct prestera_acl_rule_entry {
@@ -34,6 +35,10 @@ struct prestera_acl_rule_entry {
u8 valid:1;
} accept, drop, trap;
struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
u32 id;
struct prestera_counter_block *block;
} counter;
@@ -49,6 +54,7 @@ struct prestera_acl_ruleset {
refcount_t refcount;
void *keymask;
u32 vtcam_id;
+ u32 index;
u16 pcl_id;
bool offload;
};
@@ -83,20 +89,45 @@ static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
.automatic_shrinking = true,
};
+int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
+{
+ static const u32 client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
+ };
+
+ if (chain_index >= ARRAY_SIZE(client_map))
+ return -EINVAL;
+
+ *client = client_map[chain_index];
+ return 0;
+}
+
+static bool prestera_acl_chain_is_supported(u32 chain_index)
+{
+ return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
+}
+
static struct prestera_acl_ruleset *
prestera_acl_ruleset_create(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
u32 uid = 0;
int err;
+ if (!prestera_acl_chain_is_supported(chain_index))
+ return ERR_PTR(-EINVAL);
+
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
@@ -108,7 +139,9 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
goto err_ruleset_create;
/* make pcl-id based on uid */
- ruleset->pcl_id = (u8)uid;
+ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
+ ruleset->index = uid;
+
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
if (err)
@@ -133,35 +166,64 @@ void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
+ struct prestera_acl_iface iface;
u32 vtcam_id;
int err;
if (ruleset->offload)
return -EEXIST;
- err = prestera_acl_vtcam_id_get(ruleset->acl, 0,
+ err = prestera_acl_vtcam_id_get(ruleset->acl,
+ ruleset->ht_key.chain_index,
ruleset->keymask, &vtcam_id);
if (err)
- return err;
+ goto err_vtcam_create;
+
+ if (ruleset->ht_key.chain_index) {
+ /* for chain > 0, bind iface index to pcl-id to be able
+ * to jump from any other ruleset to this one using the index.
+ */
+ iface.index = ruleset->index;
+ iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX;
+ err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface,
+ vtcam_id, ruleset->pcl_id);
+ if (err)
+ goto err_ruleset_bind;
+ }
ruleset->vtcam_id = vtcam_id;
ruleset->offload = true;
return 0;
+
+err_ruleset_bind:
+ prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id);
+err_vtcam_create:
+ return err;
}
static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl *acl = ruleset->acl;
u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+ int err;
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
- if (ruleset->offload)
+ if (ruleset->offload) {
+ if (ruleset->ht_key.chain_index) {
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_INDEX,
+ .index = ruleset->index
+ };
+ err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface,
+ ruleset->vtcam_id);
+ WARN_ON(err);
+ }
WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+ }
idr_remove(&acl->uid, uid);
-
rhashtable_destroy(&ruleset->rule_ht);
kfree(ruleset->keymask);
kfree(ruleset);
@@ -169,23 +231,26 @@ static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
static struct prestera_acl_ruleset *
__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
ht_key.block = block;
+ ht_key.chain_index = chain_index;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
prestera_acl_ruleset_ht_params);
}
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (!ruleset)
return ERR_PTR(-ENOENT);
@@ -195,17 +260,18 @@ prestera_acl_ruleset_lookup(struct prestera_acl *acl,
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (ruleset) {
refcount_inc(&ruleset->refcount);
return ruleset;
}
- return prestera_acl_ruleset_create(acl, block);
+ return prestera_acl_ruleset_create(acl, block, chain_index);
}
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
@@ -293,6 +359,11 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
prestera_acl_rule_ht_params);
}
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->index;
+}
+
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
return ruleset->offload;
@@ -300,7 +371,7 @@ bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie, u32 chain_index)
{
struct prestera_acl_rule *rule;
@@ -310,6 +381,7 @@ prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
rule->ruleset = ruleset;
rule->cookie = cookie;
+ rule->chain_index = chain_index;
refcount_inc(&ruleset->refcount);
@@ -324,6 +396,10 @@ void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
+ if (rule->jump_ruleset)
+ /* release ruleset kept by jump action */
+ prestera_acl_ruleset_put(rule->jump_ruleset);
+
prestera_acl_ruleset_put(rule->ruleset);
kfree(rule);
}
@@ -347,7 +423,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
/* setup counter */
rule->re_arg.count.valid = true;
- rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0;
+ err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index,
+ &rule->re_arg.count.client);
+ if (err)
+ goto err_rule_add;
rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
err = WARN_ON(rule->re) ? -EEXIST : 0;
@@ -360,8 +439,10 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
if (err)
goto err_rule_add;
- /* bind the block (all ports) to chain index 0 */
- if (!ruleset->rule_count) {
+ /* bind the block (all ports) to chain index 0, rest of
+ * the chains are bound to goto action
+ */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) {
err = prestera_acl_ruleset_block_bind(ruleset, block);
if (err)
goto err_acl_block_bind;
@@ -395,7 +476,7 @@ void prestera_acl_rule_del(struct prestera_switch *sw,
prestera_acl_rule_entry_destroy(sw->acl, rule->re);
/* unbind block (all ports) */
- if (!ruleset->rule_count)
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
prestera_acl_ruleset_block_unbind(ruleset, block);
}
@@ -459,6 +540,12 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
act_num++;
}
+ /* jump */
+ if (e->jump.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
+ act_hw[act_num].jump = e->jump.i;
+ act_num++;
+ }
/* counter */
if (e->counter.block) {
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
@@ -505,6 +592,9 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
e->drop.valid = arg->drop.valid;
/* trap */
e->trap.valid = arg->trap.valid;
+ /* jump */
+ e->jump.valid = arg->jump.valid;
+ e->jump.i = arg->jump.i;
/* counter */
if (arg->count.valid) {
int err;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 40f6c1d961fa..6d2ad27682d1 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -10,6 +10,14 @@
#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
(PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00)
+#define PRESTERA_ACL_CHAIN_MASK \
+ (PRESTERA_ACL_KEYMASK_PCL_ID >> 8)
+
+#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \
+ (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \
+ (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN))
#define rule_match_set_n(match_p, type, val_p, size) \
memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
@@ -46,6 +54,7 @@ enum prestera_acl_rule_action {
PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
PRESTERA_ACL_RULE_ACTION_DROP = 1,
PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_JUMP = 5,
PRESTERA_ACL_RULE_ACTION_COUNT = 7,
PRESTERA_ACL_RULE_ACTION_MAX
@@ -61,6 +70,10 @@ struct prestera_acl_match {
__be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
+struct prestera_acl_action_jump {
+ u32 index;
+};
+
struct prestera_acl_action_count {
u32 id;
};
@@ -74,6 +87,7 @@ struct prestera_acl_hw_action_info {
enum prestera_acl_rule_action id;
union {
struct prestera_acl_action_count count;
+ struct prestera_acl_action_jump jump;
};
};
@@ -88,6 +102,10 @@ struct prestera_acl_rule_entry_arg {
u8 valid:1;
} accept, drop, trap;
struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
u8 valid:1;
u32 client;
} count;
@@ -98,7 +116,9 @@ struct prestera_acl_rule {
struct rhash_head ht_node; /* Member of acl HT */
struct list_head list;
struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_ruleset *jump_ruleset;
unsigned long cookie;
+ u32 chain_index;
u32 priority;
struct prestera_acl_rule_entry_key re_key;
struct prestera_acl_rule_entry_arg re_arg;
@@ -122,7 +142,7 @@ void prestera_acl_fini(struct prestera_switch *sw);
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie, u32 chain_index);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
@@ -147,10 +167,12 @@ prestera_acl_rule_entry_create(struct prestera_acl *acl,
struct prestera_acl_rule_entry_arg *arg);
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
void *keymask);
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
@@ -160,6 +182,7 @@ int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
@@ -167,5 +190,6 @@ prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
+int prestera_acl_chain_to_client(u32 chain_index, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index d849f046ece7..05c3ad98eba9 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -29,9 +29,6 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- if (f->common.chain_index != 0)
- return -EOPNOTSUPP;
-
switch (f->command) {
case FLOW_CLS_REPLACE:
return prestera_flower_replace(block, f);
@@ -71,6 +68,7 @@ static void prestera_flow_block_destroy(void *cb_priv)
prestera_flower_template_cleanup(block);
+ WARN_ON(!list_empty(&block->template_list));
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
@@ -86,6 +84,7 @@ prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 1ea5b745bf72..6550278b166a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -8,7 +8,6 @@
struct prestera_port;
struct prestera_switch;
-struct prestera_flower_template;
struct prestera_flow_block_binding {
struct list_head list;
@@ -22,7 +21,7 @@ struct prestera_flow_block {
struct net *net;
struct prestera_acl_ruleset *ruleset_zero;
struct flow_block_cb *block_cb;
- struct prestera_flower_template *tmplt;
+ struct list_head template_list;
unsigned int rule_count;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 19c1417fd05f..921959a980ee 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -8,26 +8,63 @@
struct prestera_flower_template {
struct prestera_acl_ruleset *ruleset;
+ struct list_head list;
+ u32 chain_index;
};
+static void
+prestera_flower_template_free(struct prestera_flower_template *template)
+{
+ prestera_acl_ruleset_put(template->ruleset);
+ list_del(&template->list);
+ kfree(template);
+}
+
void prestera_flower_template_cleanup(struct prestera_flow_block *block)
{
- if (block->tmplt) {
- /* put the reference to the ruleset kept in create */
- prestera_acl_ruleset_put(block->tmplt->ruleset);
- kfree(block->tmplt);
- block->tmplt = NULL;
- return;
- }
+ struct prestera_flower_template *template, *tmp;
+
+ /* put the reference to all rulesets kept in tmpl create */
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ prestera_flower_template_free(template);
+}
+
+static int
+prestera_flower_parse_goto_action(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ u32 chain_index,
+ const struct flow_action_entry *act)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ if (act->chain_index <= chain_index)
+ /* we can jump only forward */
+ return -EINVAL;
+
+ if (rule->re_arg.jump.valid)
+ return -EEXIST;
+
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ act->chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule->re_arg.jump.valid = 1;
+ rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
+
+ rule->jump_ruleset = ruleset;
+
+ return 0;
}
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
+ u32 chain_index,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
- int i;
+ int err, i;
/* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
@@ -53,6 +90,13 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
rule->re_arg.trap.valid = 1;
break;
+ case FLOW_ACTION_GOTO:
+ err = prestera_flower_parse_goto_action(block, rule,
+ chain_index,
+ act);
+ if (err)
+ return err;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
@@ -259,6 +303,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
}
return prestera_flower_parse_actions(block, rule, &f->rule->action,
+ f->common.chain_index,
f->common.extack);
}
@@ -270,12 +315,13 @@ int prestera_flower_replace(struct prestera_flow_block *block,
struct prestera_acl_rule *rule;
int err;
- ruleset = prestera_acl_ruleset_get(acl, block);
+ ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
/* increments the ruleset reference */
- rule = prestera_acl_rule_create(ruleset, f->cookie);
+ rule = prestera_acl_rule_create(ruleset, f->cookie,
+ f->common.chain_index);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
@@ -312,7 +358,8 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return;
@@ -345,7 +392,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
}
prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
- ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR_OR_NULL(ruleset)) {
err = -EINVAL;
goto err_ruleset_get;
@@ -364,7 +412,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
/* keep the reference to the ruleset */
template->ruleset = ruleset;
- block->tmplt = template;
+ template->chain_index = f->common.chain_index;
+ list_add_rcu(&template->list, &block->template_list);
return 0;
err_ruleset_get:
@@ -377,7 +426,14 @@ err_malloc:
void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- prestera_flower_template_cleanup(block);
+ struct prestera_flower_template *template, *tmp;
+
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ if (template->chain_index == f->common.chain_index) {
+ /* put the reference to the ruleset kept in create */
+ prestera_flower_template_free(template);
+ return;
+ }
}
int prestera_flower_stats(struct prestera_flow_block *block,
@@ -390,7 +446,8 @@ int prestera_flower_stats(struct prestera_flow_block *block,
u64 bytes;
int err;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index dc3aa4280e9f..495f151e6fa9 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -6,7 +6,6 @@
#include <net/pkt_cls.h>
-struct prestera_switch;
struct prestera_flow_block;
int prestera_flower_replace(struct prestera_flow_block *block,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index e6bfadc874c5..c66cc929c820 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -55,6 +55,8 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
@@ -424,6 +426,9 @@ struct prestera_msg_acl_action {
__le32 __reserved;
union {
struct {
+ __le32 index;
+ } jump;
+ struct {
__le32 id;
} count;
__le32 reserved[6];
@@ -499,6 +504,15 @@ struct prestera_msg_iface {
u8 __pad[3];
};
+struct prestera_msg_ip_addr {
+ union {
+ __be32 ipv4;
+ __be32 ipv6[4];
+ } u;
+ u8 v; /* e.g. PRESTERA_IPV4 */
+ u8 __pad[3];
+};
+
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
@@ -515,6 +529,15 @@ struct prestera_msg_rif_resp {
u8 __pad[2];
};
+struct prestera_msg_lpm_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_ip_addr dst;
+ __le32 grp_id;
+ __le32 dst_len;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
@@ -598,9 +621,11 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -1164,6 +1189,9 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
case PRESTERA_ACL_RULE_ACTION_TRAP:
/* just rule action id, no specific data */
break;
+ case PRESTERA_ACL_RULE_ACTION_JUMP:
+ action->jump.index = __cpu_to_le32(info->jump.index);
+ break;
case PRESTERA_ACL_RULE_ACTION_COUNT:
action->count.id = __cpu_to_le32(info->count.id);
break;
@@ -1891,6 +1919,33 @@ int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
sizeof(req));
}
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .grp_id = __cpu_to_le32(grp_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
+ sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 3ff12bae5909..fd896a8838bb 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -249,6 +249,12 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+/* LPM PI */
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id);
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 73cd0a4b7291..3952fdcc9240 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -28,6 +28,12 @@
#define PRESTERA_MAC_ADDR_NUM_MAX 255
static struct workqueue_struct *prestera_wq;
+static struct workqueue_struct *prestera_owq;
+
+void prestera_queue_work(struct work_struct *work)
+{
+ queue_work(prestera_owq, work);
+}
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
@@ -1025,12 +1031,19 @@ static int __init prestera_module_init(void)
if (!prestera_wq)
return -ENOMEM;
+ prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0);
+ if (!prestera_owq) {
+ destroy_workqueue(prestera_wq);
+ return -ENOMEM;
+ }
+
return 0;
}
static void __exit prestera_module_exit(void)
{
destroy_workqueue(prestera_wq);
+ destroy_workqueue(prestera_owq);
}
module_init(prestera_module_init);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 6ef4d32b8fdd..6c5618cf4f08 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -5,10 +5,39 @@
#include <linux/types.h>
#include <linux/inetdevice.h>
#include <net/switchdev.h>
+#include <linux/rhashtable.h>
#include "prestera.h"
#include "prestera_router_hw.h"
+struct prestera_kern_fib_cache_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 kern_tb_id; /* tb_id from kernel (not fixed) */
+};
+
+/* Subscribing on neighbours in kernel */
+struct prestera_kern_fib_cache {
+ struct prestera_kern_fib_cache_key key;
+ struct {
+ struct prestera_fib_key fib_key;
+ enum prestera_fib_type fib_type;
+ } lpm_info; /* hold prepared lpm info */
+ /* Indicate if route is not overlapped by another table */
+ struct rhash_head ht_node; /* node of prestera_router */
+ struct fib_info *fi;
+ u8 kern_tos;
+ u8 kern_type;
+ bool reachable;
+};
+
+static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_fib_cache, key),
+ .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_fib_cache_key),
+ .automatic_shrinking = true,
+};
+
/* This util to be used, to convert kernel rules for default vr in hw_vr */
static u32 prestera_fix_tb_id(u32 tb_id)
{
@@ -20,6 +49,290 @@ static u32 prestera_fix_tb_id(u32 tb_id)
return tb_id;
}
+static void
+prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
+ struct prestera_kern_fib_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
+ key->prefix_len = fen_info->dst_len;
+ key->kern_tb_id = fen_info->tb_id;
+}
+
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+
+ fib_cache =
+ rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
+ __prestera_kern_fib_cache_ht_params);
+ return fib_cache;
+}
+
+static void
+prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ fib_info_put(fib_cache->fi);
+ rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ kfree(fib_cache);
+}
+
+/* Operations on fi (offload, etc) must be wrapped in utils.
+ * This function just create storage.
+ */
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key,
+ struct fib_info *fi, u8 tos, u8 type)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
+ if (!fib_cache)
+ goto err_kzalloc;
+
+ memcpy(&fib_cache->key, key, sizeof(*key));
+ fib_info_hold(fi);
+ fib_cache->fi = fi;
+ fib_cache->kern_tos = tos;
+ fib_cache->kern_type = type;
+
+ err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_cache;
+
+err_ht_insert:
+ fib_info_put(fi);
+ kfree(fib_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool fail, bool offload, bool trap)
+{
+ struct fib_rt_info fri;
+
+ if (fc->key.addr.v != PRESTERA_IPV4)
+ return;
+
+ fri.fi = fc->fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.tos = fc->kern_tos;
+ fri.type = fc->kern_type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+}
+
+static int
+__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
+
+ switch (fc->fi->fib_type) {
+ case RTN_UNICAST:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ /* Unsupported. Leave it for kernel: */
+ case RTN_BROADCAST:
+ case RTN_MULTICAST:
+ /* Routes we must trap by design: */
+ case RTN_LOCAL:
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ case RTN_BLACKHOLE:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
+ break;
+ default:
+ dev_err(sw->dev->dev, "Unsupported fib_type");
+ return -EOPNOTSUPP;
+ }
+
+ fc->lpm_info.fib_key.addr = fc->key.addr;
+ fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
+ fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
+
+ return 0;
+}
+
+static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool enabled)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
+ if (fib_node)
+ prestera_fib_node_destroy(sw, fib_node);
+
+ if (!enabled)
+ return 0;
+
+ fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
+ fc->lpm_info.fib_type);
+
+ if (!fib_node) {
+ dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
+ &fc->key.addr.u.ipv4, fc->key.prefix_len,
+ fc->key.kern_tb_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ int err;
+
+ err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
+ if (err)
+ return err;
+
+ err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
+ if (err) {
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc,
+ true, false, false);
+ return err;
+ }
+
+ switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ false, fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
+ fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_MAIN;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_LOCAL;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static int
+prestera_k_arb_fib_evt(struct prestera_switch *sw,
+ bool replace, /* replace or del */
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ if (fib_cache) {
+ fib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying destroyed fib_cache failed");
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache && bfib_cache) {
+ bfib_cache->reachable = true;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ prestera_kern_fib_cache_destroy(sw, fib_cache);
+ }
+
+ if (replace) {
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
+ fen_info->fi,
+ fen_info->tos,
+ fen_info->type);
+ if (!fib_cache) {
+ dev_err(sw->dev->dev, "fib_cache == NULL");
+ return -ENOENT;
+ }
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache)
+ fib_cache->reachable = true;
+
+ if (bfib_cache) {
+ bfib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev, "Applying fib_cache failed");
+ }
+
+ return 0;
+}
+
static int __prestera_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -137,6 +450,89 @@ out:
return notifier_from_errno(err);
}
+struct prestera_fib_event_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct fib_entry_notifier_info fen_info;
+ unsigned long event;
+};
+
+static void __prestera_router_fib_event_work(struct work_struct *work)
+{
+ struct prestera_fib_event_work *fib_work =
+ container_of(work, struct prestera_fib_event_work, work);
+ struct prestera_switch *sw = fib_work->sw;
+ int err;
+
+ rtnl_lock();
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
+ if (err)
+ goto err_out;
+
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
+ if (err)
+ goto err_out;
+
+ break;
+ }
+
+ goto out;
+
+err_out:
+ dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
+ &fib_work->fen_info.dst,
+ fib_work->fen_info.dst_len);
+out:
+ fib_info_put(fib_work->fen_info.fi);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int __prestera_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_notifier_info *info = ptr;
+ struct prestera_router *router;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct prestera_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ if (!fen_info->fi)
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ fib_info_hold(fen_info->fi);
+ fib_work->fen_info = *fen_info;
+ fib_work->event = event;
+ fib_work->sw = router->sw;
+ INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
+ prestera_queue_work(&fib_work->work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
int prestera_router_init(struct prestera_switch *sw)
{
struct prestera_router *router;
@@ -153,6 +549,11 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_router_lib_init;
+ err = rhashtable_init(&router->kern_fib_cache_ht,
+ &__prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_kern_fib_cache_ht_init;
+
router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
if (err)
@@ -163,11 +564,21 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_register_inetaddr_notifier;
+ router->fib_nb.notifier_call = __prestera_router_fib_event;
+ err = register_fib_notifier(&init_net, &router->fib_nb,
+ /* TODO: flush fib entries */ NULL, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
return 0;
+err_register_fib_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
+ rhashtable_destroy(&router->kern_fib_cache_ht);
+err_kern_fib_cache_ht_init:
prestera_router_hw_fini(sw);
err_router_lib_init:
kfree(sw->router);
@@ -178,6 +589,7 @@ void prestera_router_fini(struct prestera_switch *sw)
{
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ rhashtable_destroy(&sw->router->kern_fib_cache_ht);
prestera_router_hw_fini(sw);
kfree(sw->router);
sw->router = NULL;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index e5592b69ad37..5b0cf3be9a9e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -9,23 +9,41 @@
#include "prestera_acl.h"
/* +--+
- * +------->|vr|
- * | +--+
- * |
- * +-+-------+
- * |rif_entry|
- * +---------+
- * Rif is
+ * +------->|vr|<-+
+ * | +--+ |
+ * | |
+ * +-+-------+ +--+---+-+
+ * |rif_entry| |fib_node|
+ * +---------+ +--------+
+ * Rif is Fib - is exit point
* used as
* entry point
* for vr in hw
*/
+#define PRESTERA_NHGR_UNUSED (0)
+#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+
+static const struct rhashtable_params __prestera_fib_ht_params = {
+ .key_offset = offsetof(struct prestera_fib_node, key),
+ .head_offset = offsetof(struct prestera_fib_node, ht_node),
+ .key_len = sizeof(struct prestera_fib_key),
+ .automatic_shrinking = true,
+};
+
int prestera_router_hw_init(struct prestera_switch *sw)
{
+ int err;
+
+ err = rhashtable_init(&sw->router->fib_ht,
+ &__prestera_fib_ht_params);
+ if (err)
+ goto err_fib_ht_init;
+
INIT_LIST_HEAD(&sw->router->vr_list);
INIT_LIST_HEAD(&sw->router->rif_entry_list);
+err_fib_ht_init:
return 0;
}
@@ -33,6 +51,7 @@ void prestera_router_hw_fini(struct prestera_switch *sw)
{
WARN_ON(!list_empty(&sw->router->vr_list));
WARN_ON(!list_empty(&sw->router->rif_entry_list));
+ rhashtable_destroy(&sw->router->fib_ht);
}
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
@@ -212,3 +231,102 @@ err_key_copy:
err_kzalloc:
return NULL;
}
+
+struct prestera_fib_node *
+prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key,
+ __prestera_fib_ht_params);
+ return fib_node;
+}
+
+static void __prestera_fib_node_destruct(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ struct prestera_vr *vr;
+
+ vr = fib_node->info.vr;
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
+ fib_node->key.prefix_len);
+ switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ break;
+ default:
+ pr_err("Unknown fib_node->info.type = %d",
+ fib_node->info.type);
+ }
+
+ prestera_vr_put(sw, vr);
+}
+
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ __prestera_fib_node_destruct(sw, fib_node);
+ rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ kfree(fib_node);
+}
+
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type)
+{
+ struct prestera_fib_node *fib_node;
+ u32 grp_id;
+ struct prestera_vr *vr;
+ int err;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ goto err_kzalloc;
+
+ memcpy(&fib_node->key, key, sizeof(*key));
+ fib_node->info.type = fib_type;
+
+ vr = prestera_vr_get(sw, key->tb_id, NULL);
+ if (IS_ERR(vr))
+ goto err_vr_get;
+
+ fib_node->info.vr = vr;
+
+ switch (fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ grp_id = PRESTERA_NHGR_UNUSED;
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ grp_id = PRESTERA_NHGR_DROP;
+ break;
+ default:
+ pr_err("Unsupported fib_type %d", fib_type);
+ goto err_nh_grp_get;
+ }
+
+ err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len, grp_id);
+ if (err)
+ goto err_lpm_add;
+
+ err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_node;
+
+err_ht_insert:
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len);
+err_lpm_add:
+err_nh_grp_get:
+ prestera_vr_put(sw, vr);
+err_vr_get:
+ kfree(fib_node);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
index b6b028551868..67dbb49c8bd4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -22,6 +22,42 @@ struct prestera_rif_entry {
struct list_head router_node; /* ht */
};
+struct prestera_ip_addr {
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } u;
+ enum {
+ PRESTERA_IPV4 = 0,
+ PRESTERA_IPV6
+ } v;
+};
+
+struct prestera_fib_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 tb_id;
+};
+
+struct prestera_fib_info {
+ struct prestera_vr *vr;
+ struct list_head vr_node;
+ enum prestera_fib_type {
+ PRESTERA_FIB_TYPE_INVALID = 0,
+ /* It can be connected route
+ * and will be overlapped with neighbours
+ */
+ PRESTERA_FIB_TYPE_TRAP,
+ PRESTERA_FIB_TYPE_DROP
+ } type;
+};
+
+struct prestera_fib_node {
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct prestera_fib_key key;
+ struct prestera_fib_info info; /* action related info */
+};
+
struct prestera_rif_entry *
prestera_rif_entry_find(const struct prestera_switch *sw,
const struct prestera_rif_entry_key *k);
@@ -31,6 +67,14 @@ struct prestera_rif_entry *
prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
+struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
+ struct prestera_fib_key *key);
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node);
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type);
int prestera_router_hw_init(struct prestera_switch *sw);
void prestera_router_hw_fini(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 89ca7960b225..4cd0747edaff 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1556,6 +1556,7 @@ static int mtk_star_probe(struct platform_device *pdev)
return devm_register_netdev(dev, ndev);
}
+#ifdef CONFIG_OF
static const struct of_device_id mtk_star_of_match[] = {
{ .compatible = "mediatek,mt8516-eth", },
{ .compatible = "mediatek,mt8518-eth", },
@@ -1563,6 +1564,7 @@ static const struct of_device_id mtk_star_of_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+#endif
static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
mtk_star_suspend, mtk_star_resume);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8cfc649f226b..8f762fc170b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1067,7 +1067,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof(*context), GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -1078,7 +1078,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
}
qp->event = mlx4_en_sqp_event;
- memset(context, 0, sizeof(*context));
mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, -1, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 817f4154b86d..f777151d226f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -42,7 +42,6 @@
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/moduleparam.h>
#include <linux/indirect_call_wrapper.h>
#include "mlx4_en.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index fcfd38fa9e6c..4bc666714a35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
#
# Netdev extra
@@ -55,7 +55,11 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
en/tc/act/redirect_ingress.o
-mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
+ifneq ($(CONFIG_MLX5_TC_CT),)
+ mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
+ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+endif
+
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
@@ -103,9 +107,10 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
+ steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
- steering/dr_dbg.o
+ steering/dr_dbg.o lib/smfs.o
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 291e427e9e4f..e52b0bac09da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
-{
- dma_addr_t t;
-
- buf->size = size;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
-
- buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
- if (!buf->frags)
- return -ENOMEM;
-
- buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->frags->buf)
- goto err_out;
-
- buf->frags->map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
-
- return 0;
-err_out:
- kfree(buf->frags);
- return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf)
-{
- return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
- dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
- buf->frags->map);
-
- kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
@@ -183,11 +136,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
if (!pgdir)
return NULL;
- pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
+ pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
@@ -286,19 +239,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
- u64 addr;
- int i;
-
- for (i = 0; i < buf->npages; i++) {
- addr = buf->frags->map + (i << buf->page_shift);
-
- pas[i] = cpu_to_be64(addr);
- }
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index fc19a0762af2..26ba94cb432e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/highmem.h>
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -191,10 +190,10 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
return 0;
}
@@ -260,12 +259,12 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
- return err;
+ return -EHWPOISON;
next = next->next;
}
@@ -485,7 +484,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_LOAD_VHCA_STATE:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
- return -EIO;
+ return -ENOLINK;
default:
mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
return -EINVAL;
@@ -771,44 +770,72 @@ struct mlx5_ifc_mbox_in_bits {
u8 reserved_at_40[0x40];
};
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
- *status = MLX5_GET(mbox_out, out, status);
- *syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
+
+ mlx5_core_err_rl(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
}
+EXPORT_SYMBOL(mlx5_cmd_out_err);
-static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
+ u16 opcode, op_mod;
u32 syndrome;
u8 status;
- u16 opcode;
- u16 op_mod;
u16 uid;
+ int err;
- mlx5_cmd_mbox_status(out, &status, &syndrome);
- if (!status)
- return 0;
+ syndrome = MLX5_GET(mbox_out, out, syndrome);
+ status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
+ err = cmd_status_to_err(status);
+
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
- mlx5_core_err_rl(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode), opcode, op_mod,
- cmd_status_str(status), status, syndrome);
+ mlx5_cmd_out_err(dev, opcode, op_mod, out);
else
mlx5_core_dbg(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode),
- opcode, op_mod,
- cmd_status_str(status),
- status,
- syndrome);
+ "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod, uid,
+ cmd_status_str(status), status, syndrome, err);
+}
- return cmd_status_to_err(status);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
+{
+ /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
+ if (err == -ENXIO) {
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u32 syndrome;
+ u8 status;
+
+ /* PCI Error, emulate command return status, for smooth reset */
+ err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, syndrome);
+ if (!err)
+ return 0;
+ }
+
+ /* driver or FW delivery error */
+ if (err != -EREMOTEIO && err)
+ return err;
+
+ /* check outbox status */
+ err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
+ if (err)
+ cmd_status_print(dev, in, out);
+
+ return err;
}
+EXPORT_SYMBOL(mlx5_cmd_check);
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
@@ -991,13 +1018,7 @@ static void cmd_work_handler(struct work_struct *work)
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
- u8 status = 0;
- u32 drv_synd;
-
- ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
- MLX5_SET(mbox_out, ent->out, status, status);
- MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
-
+ ent->ret = -ENXIO;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
return;
}
@@ -1016,6 +1037,31 @@ static void cmd_work_handler(struct work_struct *work)
}
}
+static int deliv_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_DELIVERY_STAT_OK:
+ case MLX5_DRIVER_STATUS_ABORTED:
+ return 0;
+ case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
+ case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
+ return -EBADR;
+ case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
+ case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
+ return -EFAULT; /* Bad address */
+ case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
+ case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
+ return -ENOMSG;
+ case MLX5_CMD_DELIVERY_STAT_FW_ERR:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
static const char *deliv_status_to_str(u8 status)
{
switch (status) {
@@ -1112,16 +1158,27 @@ out_err:
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
+ *
+ * return value in case (!callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret > 0 : Command execution couldn't be performed by firmware
+ * ret == 0: Command was executed by FW, Caller must check FW outbox status.
+ *
+ * return value in case (callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret == 0: Command will be submitted to FW for execution
+ * and the callback will be called for further status updates
*/
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status,
+ void *context, int page_queue,
u8 token, bool force_polling)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
struct mlx5_cmd_stats *stats;
+ u8 status = 0;
int err = 0;
s64 ds;
u16 op;
@@ -1152,12 +1209,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
cmd_work_handler(&ent->work);
} else if (!queue_work(cmd->wq, &ent->work)) {
mlx5_core_warn(dev, "failed to queue work\n");
- err = -ENOMEM;
+ err = -EALREADY;
goto out_free;
}
if (callback)
- goto out; /* mlx5_cmd_comp_handler() will put(ent) */
+ return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
if (err == -ETIMEDOUT || err == -ECANCELED)
@@ -1175,12 +1232,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
mlx5_command_str(op), ds);
- *status = ent->status;
out_free:
+ status = ent->status;
cmd_ent_put(ent);
-out:
- return err;
+ return err ? : status;
}
static ssize_t dbg_write(struct file *filp, const char __user *buf,
@@ -1497,7 +1553,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
+ dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
@@ -1623,15 +1679,15 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
ent->ts2 = ktime_get_ns();
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
- if (!ent->ret) {
+
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->ret = -ENXIO;
+
+ if (!ent->ret) { /* Command completed by FW */
if (!cmd->checksum_disabled)
ent->ret = verify_signature(ent);
- else
- ent->ret = 0;
- if (vec & MLX5_TRIGGERED_CMD_COMP)
- ent->status = MLX5_DRIVER_STATUS_ABORTED;
- else
- ent->status = ent->lay->status_own >> 1;
+
+ ent->status = ent->lay->status_own >> 1;
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
@@ -1649,21 +1705,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
callback = ent->callback;
context = ent->context;
- err = ent->ret;
- if (!err) {
+ err = ent->ret ? : ent->status;
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
+ if (!err)
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
- err = err ? err : mlx5_cmd_check(dev,
- ent->in->first.data,
- ent->uout);
- }
-
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
- err = err ? err : ent->status;
/* final consumer is done, release ent */
cmd_ent_put(ent);
callback(err, context);
@@ -1677,7 +1730,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
unsigned long bitmask;
@@ -1730,31 +1783,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
up(&cmd->sem);
}
-static int status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_DELIVERY_STAT_OK:
- case MLX5_DRIVER_STATUS_ABORTED:
- return 0;
- case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
- case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
- return -EBADR;
- case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
- case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
- return -EFAULT; /* Bad address */
- case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
- case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
- return -ENOMSG;
- case MLX5_CMD_DELIVERY_STAT_FW_ERR:
- return -EIO;
- default:
- return -EINVAL;
- }
-}
-
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp)
{
@@ -1798,27 +1826,23 @@ static int is_manage_pages(void *in)
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
+/* Notes:
+ * 1. Callback functions may not sleep
+ * 2. Page queue commands do not support asynchrous completion
+ */
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size, mlx5_cmd_cbk_t callback, void *context,
bool force_polling)
{
- struct mlx5_cmd_msg *inb;
- struct mlx5_cmd_msg *outb;
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ struct mlx5_cmd_msg *inb, *outb;
int pages_queue;
gfp_t gfp;
- int err;
- u8 status = 0;
- u32 drv_synd;
- u16 opcode;
u8 token;
+ int err;
- opcode = MLX5_GET(mbox_in, in, opcode);
- if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
- err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
- MLX5_SET(mbox_out, out, status, status);
- MLX5_SET(mbox_out, out, syndrome, drv_synd);
- return err;
- }
+ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
+ return -ENXIO;
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
@@ -1844,39 +1868,133 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status, token, force_polling);
+ pages_queue, token, force_polling);
+ if (callback)
+ return err;
+
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
if (err)
goto out_out;
- mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
- if (status) {
- err = status_to_err(status);
- goto out_out;
+ /* command completed by FW */
+ err = mlx5_copy_from_msg(out, outb, out_size);
+out_out:
+ mlx5_free_cmd_msg(dev, outb);
+out_in:
+ free_msg(dev, inb);
+ return err;
+}
+
+static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err)
+{
+ struct mlx5_cmd_stats *stats;
+
+ if (!err)
+ return;
+
+ stats = &dev->cmd.stats[opcode];
+ spin_lock_irq(&stats->lock);
+ stats->failed++;
+ if (err < 0)
+ stats->last_failed_errno = -err;
+ if (err == -EREMOTEIO) {
+ stats->failed_mbox_status++;
+ stats->last_failed_mbox_status = status;
}
+ spin_unlock_irq(&stats->lock);
+}
- if (!callback)
- err = mlx5_copy_from_msg(out, outb, out_size);
+/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
+{
+ u8 status = MLX5_GET(mbox_out, out, status);
-out_out:
- if (!callback)
- mlx5_free_cmd_msg(dev, outb);
+ if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
+ err = -EIO;
-out_in:
- if (!callback)
- free_msg(dev, inb);
+ if (!err && status != MLX5_CMD_STAT_OK)
+ err = -EREMOTEIO;
+
+ cmd_status_log(dev, opcode, status, err);
return err;
}
+/**
+ * mlx5_cmd_do - Executes a fw command, wait for completion.
+ * Unlike mlx5_cmd_exec, this function will not translate or intercept
+ * outbox.status and will return -EREMOTEIO when
+ * outbox.status != MLX5_CMD_STAT_OK
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return:
+ * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
+ * Caller must check FW outbox status.
+ * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
+ * < 0 : Command execution couldn't be performed by firmware or driver
+ */
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_cmd_do);
+
+/**
+ * mlx5_cmd_exec - Executes a fw command, wait for completion
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- int err;
+ int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- return err ? : mlx5_cmd_check(dev, in, out);
+ return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
+/**
+ * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
+ * Needed for driver force teardown, when command completion EQ
+ * will not be available to complete the command
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+ u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+ err = cmd_status_err(dev, err, opcode, out);
+ return mlx5_cmd_check(dev, err, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_polling);
+
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
struct mlx5_async_ctx *ctx)
{
@@ -1905,8 +2023,10 @@ EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
- struct mlx5_async_ctx *ctx = work->ctx;
+ struct mlx5_async_ctx *ctx;
+ ctx = work->ctx;
+ status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
wake_up(&ctx->wait);
@@ -1920,6 +2040,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
+ work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
@@ -1931,17 +2053,6 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
-int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size)
-{
- int err;
-
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
-
- return err ? : mlx5_cmd_check(dev, in, out);
-}
-EXPORT_SYMBOL(mlx5_cmd_exec_polling);
-
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 5371ad0a12eb..4caa1b6f40ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
@@ -86,8 +85,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
-int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen, u32 *out, int outlen)
+/* Callers must verify outbox status in case of err */
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn_or_apu_element);
@@ -101,7 +101,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -148,6 +148,16 @@ err_cmd:
mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
+EXPORT_SYMBOL(mlx5_create_cq);
+
+/* oubox is checked and err val is normalized */
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
+{
+ int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen);
+
+ return mlx5_cmd_check(dev, err, in, out);
+}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10d195042ab5..3d3e55a5cb11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
@@ -99,26 +98,32 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
+{
+ return dev->priv.dbg.dbg_root;
+}
+EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
+
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
+ dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.qp_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
+ dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.eq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
}
static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
@@ -168,8 +173,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
const char *namep;
int i;
- cmd = &dev->priv.cmdif_debugfs;
- *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
+ cmd = &dev->priv.dbg.cmdif_debugfs;
+ *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
@@ -180,23 +185,51 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_file("average", 0400, stats->root, stats,
&stats_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
+ debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
+ debugfs_create_u64("failed_mbox_status", 0400, stats->root,
+ &stats->failed_mbox_status);
+ debugfs_create_u32("last_failed_errno", 0400, stats->root,
+ &stats->last_failed_errno);
+ debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
+ &stats->last_failed_mbox_status);
}
}
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
+ dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
+}
+
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
+{
+ struct dentry *pages;
+
+ dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
+ pages = dev->priv.dbg.pages_debugfs;
+
+ debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
+ debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
+ debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
+ &dev->priv.reclaim_pages_discard);
+}
+
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+ debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
@@ -441,7 +474,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
&qp->dbg, qp->qpn, qp_fields,
ARRAY_SIZE(qp_fields), qp);
if (err)
@@ -468,7 +501,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
&eq->dbg, eq->eqn, eq_fields,
ARRAY_SIZE(eq_fields), eq);
if (err)
@@ -493,7 +526,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
&cq->dbg, cq->cqn, cq_fields,
ARRAY_SIZE(cq_fields), cq);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index d1093bb2d436..057dde6f4417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -100,15 +100,11 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
}
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
- err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
+ err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
if (err)
- goto out;
+ return err;
- err = mlx5_fw_reset_wait_reset_done(dev);
-out:
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
- return err;
+ return mlx5_fw_reset_wait_reset_done(dev);
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c14e06ca64d8..8653ac0fd865 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -59,6 +59,7 @@
#include "lib/hv_vhca.h"
#include "lib/clock.h"
#include "en/rx_res.h"
+#include "en/selq.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -172,8 +173,9 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
+ << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -221,6 +223,32 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
+/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
+ * bytes units. Driver hardens the limitation to 1KB (16
+ * WQEBBs), unless firmware capability is stricter.
+ */
+static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+{
+ return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+}
+
+static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
+{
+/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
+ * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
+ * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64)
+ * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower
+ * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
+ * cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+ return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+#else
+ return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
+#endif
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -377,6 +405,7 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ MLX5E_SQ_STATE_XDP_MULTIBUF,
};
struct mlx5e_tx_mpwqe {
@@ -427,12 +456,12 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
+ u16 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
unsigned int hw_mtu;
- struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
@@ -446,6 +475,7 @@ struct mlx5e_txqsq {
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
cqe_ts_to_ns ptp_cyc2time;
+ u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
@@ -486,7 +516,7 @@ struct mlx5e_xdp_info {
} frame;
struct {
struct mlx5e_rq *rq;
- struct mlx5e_dma_info di;
+ struct page *page;
} page;
};
};
@@ -508,7 +538,7 @@ struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- struct mlx5e_xdp_info *,
+ struct skb_shared_info *,
int);
struct mlx5e_xdpsq {
@@ -540,6 +570,8 @@ struct mlx5e_xdpsq {
u32 sqn;
struct device *pdev;
__be32 mkey_be;
+ u16 stop_room;
+ u16 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -547,6 +579,7 @@ struct mlx5e_xdpsq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+ u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_ktls_resync_resp;
@@ -575,6 +608,7 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+ u16 max_sq_wqebbs;
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
@@ -681,6 +715,7 @@ struct mlx5e_rq {
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ u8 min_wqe_bulk;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
@@ -876,9 +911,8 @@ struct mlx5e_trap;
struct mlx5e_priv {
/* priv data path fields - start */
+ struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
- int **channel_tc2realtxq;
- int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -921,7 +955,6 @@ struct mlx5e_priv {
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
- int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 66180ffb4606..08fd1370a8b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -178,16 +178,28 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
+{
+#define UMR_WQE_BULK (2)
+ return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
+}
+
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
- mlx5e_rx_is_linear_skb(params, xsk) :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+ return linear_headroom;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return linear_headroom;
+
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ return linear_headroom;
- return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
- mlx5e_get_linear_rq_headroom(params, xsk) : 0;
+ return 0;
}
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -196,13 +208,13 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
u16 stop_room;
stop_room = mlx5e_tls_get_stop_room(mdev, params);
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
* stop room can be taken if a new packet breaks the active
* MPWQE session and allocates its WQEs right away.
*/
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
return stop_room;
}
@@ -359,12 +371,13 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
{
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
+ * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*
* No XSK params: checking the availability of striding RQ in general.
*/
- if (!slow_pci_heuristic(mdev) &&
+ if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
+ MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
!mlx5e_rx_is_linear_skb(params, NULL)))
@@ -385,16 +398,29 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
};
}
+static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
+{
+ if (xdp)
+ /* XDP requires all fragments to be of the same size. */
+ return first_frag_size + (MLX5E_MAX_RX_FRAGS - 1) * frag_size;
+
+ /* Optimization for small packets: the last fragment is bigger than the others. */
+ return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
+}
+
#define DEFAULT_FRAG_SIZE (2048)
-static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_frags_info *info)
+static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ struct mlx5e_rq_frags_info *info)
{
u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
int frag_size_max = DEFAULT_FRAG_SIZE;
+ int first_frag_size_max;
u32 buf_size = 0;
+ u16 headroom;
+ int max_mtu;
int i;
if (mlx5_fpga_is_ipsec_device(mdev))
@@ -413,21 +439,48 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
goto out;
}
- if (byte_count > PAGE_SIZE +
- (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
+ headroom = mlx5e_get_linear_rq_headroom(params, xsk);
+ first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+ max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+ params->xdp_prog);
+ if (byte_count > max_mtu || params->xdp_prog) {
frag_size_max = PAGE_SIZE;
+ first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
+
+ max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max,
+ params->xdp_prog);
+ if (byte_count > max_mtu) {
+ mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
+ params->sw_mtu, max_mtu);
+ return -EINVAL;
+ }
+ }
i = 0;
while (buf_size < byte_count) {
int frag_size = byte_count - buf_size;
- if (i < MLX5E_MAX_RX_FRAGS - 1)
+ if (i == 0)
+ frag_size = min(frag_size, first_frag_size_max);
+ else if (i < MLX5E_MAX_RX_FRAGS - 1)
frag_size = min(frag_size, frag_size_max);
info->arr[i].frag_size = frag_size;
- info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
-
buf_size += frag_size;
+
+ if (params->xdp_prog) {
+ /* XDP multi buffer expects fragments of the same size. */
+ info->arr[i].frag_stride = frag_size_max;
+ } else {
+ if (i == 0) {
+ /* Ensure that headroom and tailroom are included. */
+ frag_size += headroom;
+ frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+ info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
+ }
+
i++;
}
info->num_frags = i;
@@ -437,6 +490,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
out:
info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
info->log_num_frags = order_base_2(info->num_frags);
+
+ return 0;
}
static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
@@ -533,6 +588,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
int ndsegs = 1;
+ int err;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
@@ -572,7 +628,9 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ if (err)
+ return err;
ndsegs = param->frags_info.num_frags;
}
@@ -717,7 +775,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
@@ -774,10 +832,10 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(mdev, param);
- param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+ param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
if (param->is_tls)
- param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
+ param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
mlx5e_build_ico_cq_param(mdev, log_wq_size, &param->cqp);
@@ -785,6 +843,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -793,6 +852,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+ param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
@@ -812,7 +872,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 433e6967692d..f5c46e78eebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -31,6 +31,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
+ bool is_xdp_mb;
u16 stop_room;
};
@@ -129,6 +130,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
@@ -154,6 +156,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 82baafd3c00c..335b20b6383b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -195,7 +195,6 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
int node;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
@@ -449,7 +448,7 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index c1e07496c89c..9db677e9ca9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -50,7 +50,6 @@ static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
struct mlx5e_qos_node {
struct hlist_node hnode;
- struct rcu_head rcu;
struct mlx5e_qos_node *parent;
u64 rate;
u32 bw_share;
@@ -132,7 +131,11 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node
__clear_bit(node->qid, priv->htb.qos_used_qids);
mlx5e_update_tx_netdev_queues(priv);
}
- kfree_rcu(node, rcu);
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
}
/* TX datapath API */
@@ -273,10 +276,18 @@ err_free_sq:
static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
{
struct mlx5e_txqsq *sq;
+ u16 qid;
sq = mlx5e_get_qos_sq(priv, node->qid);
- WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+ qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
+
+ /* If it's a new queue, it will be marked as started at this point.
+ * Stop it before updating txq2sq.
+ */
+ mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
+
+ priv->txq2sq[qid] = sq;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -299,8 +310,13 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- /* The queue is disabled, no synchronization with datapath is needed. */
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+
+ /* Make the change to txq2sq visible before the queue is started again.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
@@ -485,9 +501,11 @@ int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) {
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
+
err = mlx5e_qos_alloc_queues(priv, &priv->channels);
if (err)
- return err;
+ goto err_cancel_selq;
}
root = mlx5e_sw_node_create_root(priv);
@@ -508,6 +526,9 @@ int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
*/
smp_store_release(&priv->htb.maj_id, htb_maj_id);
+ if (opened)
+ mlx5e_selq_apply(&priv->selq);
+
return 0;
err_sw_node_delete:
@@ -516,6 +537,8 @@ err_sw_node_delete:
err_free_queues:
if (opened)
mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -526,8 +549,15 @@ int mlx5e_htb_root_del(struct mlx5e_priv *priv)
qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
+ mlx5e_selq_apply(&priv->selq);
+
WRITE_ONCE(priv->htb.maj_id, 0);
- synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
if (!root) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index b7558907ba20..5d9bd91d86c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -18,7 +18,6 @@ int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
/* TX datapath API */
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
-struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
/* SQ lifecycle */
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 0991345c4ae5..86fa0bdbee36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -263,14 +263,14 @@ int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_tc_esw_init(uplink_priv);
return err;
}
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
{
/* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1cdd8c2e37a..7f93426b88b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -442,7 +442,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
goto inner_tir;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
@@ -457,7 +457,7 @@ inner_tir:
continue;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
new file mode 100644
index 000000000000..d98a277eb7f8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "selq.h"
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include "en.h"
+#include "en/ptp.h"
+
+struct mlx5e_selq_params {
+ unsigned int num_regular_queues;
+ unsigned int num_channels;
+ unsigned int num_tcs;
+ union {
+ u8 is_special_queues;
+ struct {
+ bool is_htb : 1;
+ bool is_ptp : 1;
+ };
+ };
+};
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+{
+ struct mlx5e_selq_params *init_params;
+
+ selq->state_lock = state_lock;
+
+ selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
+ if (!selq->standby)
+ return -ENOMEM;
+
+ init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
+ if (!init_params) {
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ return -ENOMEM;
+ }
+ /* Assign dummy values, so that mlx5e_select_queue won't crash. */
+ *init_params = (struct mlx5e_selq_params) {
+ .num_regular_queues = 1,
+ .num_channels = 1,
+ .num_tcs = 1,
+ .is_htb = false,
+ .is_ptp = false,
+ };
+ rcu_assign_pointer(selq->active, init_params);
+
+ return 0;
+}
+
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+{
+ WARN_ON_ONCE(selq->is_prepared);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ selq->is_prepared = true;
+
+ mlx5e_selq_apply(selq);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+}
+
+void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq->standby->num_channels = params->num_channels;
+ selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
+ selq->standby->num_regular_queues =
+ selq->standby->num_channels * selq->standby->num_tcs;
+ selq->standby->is_htb = htb;
+ selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
+}
+
+void mlx5e_selq_apply(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *old_params;
+
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+
+ old_params = rcu_replace_pointer(selq->active, selq->standby,
+ lockdep_is_held(selq->state_lock));
+ synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
+ selq->standby = old_params;
+}
+
+void mlx5e_selq_cancel(struct mlx5e_selq *selq)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+}
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
+static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP)
+ return mlx5e_get_dscp_up(priv, skb);
+#endif
+ if (skb_vlan_tag_present(skb))
+ return skb_vlan_tag_get_prio(skb);
+ return 0;
+}
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int up;
+
+ up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0;
+
+ return selq->num_regular_queues + up;
+}
+
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ u16 classid;
+
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(priv->htb.defcls);
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_get_txq_by_classid(priv, classid);
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_selq_params *selq;
+ int txq_ix, up;
+
+ selq = rcu_dereference_bh(priv->selq.active);
+
+ /* This is a workaround needed only for the mlx5e_netdev_change_profile
+ * flow that zeroes out the whole priv without unregistering the netdev
+ * and without preventing ndo_select_queue from being called.
+ */
+ if (unlikely(!selq))
+ return 0;
+
+ if (likely(!selq->is_special_queues)) {
+ /* No special queues, netdev_pick_tx returns one of the regular ones. */
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ /* Normalize any picked txq_ix to [0, num_channels),
+ * So we can return a txq_ix that matches the channel and
+ * packet UP.
+ */
+ return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
+ up * selq->num_channels;
+ }
+
+ if (unlikely(selq->is_htb)) {
+ /* num_tcs == 1, shortcut for PTP */
+
+ txq_ix = mlx5e_select_htb_queue(priv, skb);
+ if (txq_ix > 0)
+ return txq_ix;
+
+ if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
+ return selq->num_channels;
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
+ * If they are selected, switch to regular queues.
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
+ */
+ return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
+ }
+
+ /* PTP is enabled */
+
+ if (mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb, selq);
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Normalize any picked txq_ix to [0, num_channels). Queues in range
+ * [0, num_regular_queues) will be mapped to the corresponding channel
+ * index, so that we can apply the packet's UP (if num_tcs > 1).
+ * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
+ * because driver should select the PTP only at mlx5e_select_ptpsq().
+ */
+ txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ return txq_ix + up * selq->num_channels;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
new file mode 100644
index 000000000000..6c070141d8f1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_SELQ_H__
+#define __MLX5_EN_SELQ_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_selq_params;
+
+struct mlx5e_selq {
+ struct mlx5e_selq_params __rcu *active;
+ struct mlx5e_selq_params *standby;
+ struct mutex *state_lock; /* points to priv->state_lock */
+ bool is_prepared;
+};
+
+struct mlx5e_params;
+struct net_device;
+struct sk_buff;
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
+void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_apply(struct mlx5e_selq *selq);
+void mlx5e_selq_cancel(struct mlx5e_selq *selq);
+
+static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels)
+{
+ while (unlikely(txq >= num_channels))
+ txq -= num_channels;
+ return txq;
+}
+
+static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels)
+{
+ if (unlikely(txq >= num_channels)) {
+ if (unlikely(txq >= num_channels << 3))
+ txq %= num_channels;
+ else
+ do
+ txq -= num_channels;
+ while (txq >= num_channels);
+ }
+ return txq;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+#endif /* __MLX5_EN_SELQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
index b0de6b999675..21aab96357b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -18,9 +19,8 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_ACCEPT;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index e600924e30ea..af37a8d247a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
+#include "en/tc/post_act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
@@ -34,6 +35,13 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
&mlx5e_tc_act_mpls_pop,
+ NULL, /* FLOW_ACTION_MPLS_MANGLE, */
+ NULL, /* FLOW_ACTION_GATE, */
+ NULL, /* FLOW_ACTION_PPPOE_PUSH, */
+ NULL, /* FLOW_ACTION_JUMP, */
+ NULL, /* FLOW_ACTION_PIPE, */
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan,
};
/* Must be aligned with enum flow_action_id. */
@@ -101,3 +109,75 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}
+
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder)
+{
+ struct flow_action_entry *act;
+ int i, j = 0;
+
+ flow_action_for_each(i, act, flow_action) {
+ /* Add CT action to be first. */
+ if (act->id == FLOW_ACTION_CT)
+ flow_action_reorder->entries[j++] = act;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT)
+ continue;
+ flow_action_reorder->entries[j++] = act;
+ }
+}
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ struct mlx5e_priv *priv;
+ int err = 0, i;
+
+ priv = parse_state->flow->priv;
+
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse ||
+ !tc_act->can_offload(parse_state, act, i, attr))
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr)
+{
+ struct mlx5_core_dev *mdev = flow->priv->mdev;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+ int err;
+
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+
+ /* Set handle on current post act rule to next post act rule. */
+ err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed setting post action handle");
+ return err;
+ }
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 9cc844bd00f5..f34714c5ddd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -16,14 +16,17 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ u32 actions;
+ bool ct;
bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
+ bool eth_push;
+ bool eth_pop;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
struct mlx5e_mpls_info mpls_info;
- struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
struct mlx5_tc_ct_priv *ct_priv;
@@ -32,7 +35,8 @@ struct mlx5e_tc_act_parse_state {
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index);
+ int act_index,
+ struct mlx5_flow_attr *attr);
int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -42,6 +46,15 @@ struct mlx5e_tc_act {
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
+
+ bool (*is_multi_table_act)(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr);
+};
+
+struct mlx5e_tc_flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry **entries;
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
@@ -74,4 +87,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder);
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type);
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr);
+
#endif /* __MLX5_EN_TC_ACT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
index 29920ef0180a..c0f08ae6a57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
@@ -38,11 +38,12 @@ csum_offload_supported(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow *flow = parse_state->flow;
- return csum_offload_supported(flow->priv, flow->attr->action,
+ return csum_offload_supported(flow->priv, attr->action,
act->csum_flags, parse_state->extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 58cc33f1363d..b9d38fe807df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -8,13 +8,14 @@
static bool
tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
struct netlink_ext_ack *extack = parse_state->extack;
- if (flow_flag_test(parse_state->flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ if (parse_state->ct && !clear_action) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
return false;
}
@@ -40,18 +41,34 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- flow_flag_set(parse_state->flow, CT);
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ if (!clear_action) {
+ attr->flags |= MLX5_ATTR_FLAG_CT;
+ flow_flag_set(parse_state->flow, CT);
+ parse_state->ct = true;
+ }
parse_state->ct_clear = clear_action;
return 0;
}
+static bool
+tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->ct.action & TCA_CT_ACT_CLEAR)
+ return false;
+
+ return true;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
+ .is_multi_table_act = tc_act_is_multi_table_act_ct,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
index 2e29a23bed12..dd025a95c439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -18,8 +19,7 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index f44515061228..4726bcb46eec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -8,6 +8,7 @@
static int
validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
@@ -32,7 +33,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
}
if (!mlx5_chains_backwards_supported(chains) &&
- dest_chain <= flow->attr->chain) {
+ dest_chain <= attr->chain) {
NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
}
@@ -43,8 +44,8 @@ validate_goto_chain(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ if (attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
"Goto chain is not allowed if action has reformat or decap");
@@ -57,12 +58,13 @@ validate_goto_chain(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
- if (validate_goto_chain(flow->priv, flow, act, extack))
+ if (validate_goto_chain(flow->priv, flow, attr, act, extack))
return false;
return true;
@@ -74,8 +76,7 @@ tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_chain = act->chain_index;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
index d775c3d9edf3..e8d227595b3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 2e615e0ba972..2b002c6a2e73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -99,7 +99,8 @@ get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
static bool
tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
@@ -108,8 +109,8 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv = flow->priv;
struct mlx5_esw_flow_attr *esw_attr;
- parse_attr = flow->attr->parse_attr;
- esw_attr = flow->attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
if (!out_dev) {
/* out_dev is NULL when filters with
@@ -124,6 +125,16 @@ tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
return false;
}
+ if (parse_state->eth_pop && !parse_state->mpls_push) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan pop eth is supported only with mpls push");
+ return false;
+ }
+
+ if (flow_flag_test(parse_state->flow, L3_TO_L2_DECAP) && !parse_state->eth_push) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop is only supported with vlan eth push");
+ return false;
+ }
+
if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
/* Ignore forward to self rules generated
* by adding both mlx5 devs to the flow table
@@ -301,8 +312,7 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
index 2c74567b6d25..90b4c1b34776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
@@ -39,8 +40,7 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
index 89ca88c78840..f106190bf37c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -8,7 +8,8 @@
static bool
tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_priv *priv = parse_state->flow->priv;
@@ -47,21 +48,22 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
static bool
tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
struct net_device *filter_dev;
- filter_dev = flow->attr->parse_attr->filter_dev;
+ filter_dev = attr->parse_attr->filter_dev;
/* we only support mpls pop if it is the first action
+ * or it is second action after tunnel key unset
* and the filter net device is bareudp. Subsequent
* actions can be pedit and the last can be mirred
* egress redirect.
*/
- if (act_index) {
- NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+ if ((act_index == 1 && !parse_state->decap) || act_index > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action or with decap");
return false;
}
@@ -79,7 +81,7 @@ tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+ attr->esw_attr->eth.h_proto = act->mpls_pop.proto;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 79addbbef087..47597c524e59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -42,12 +42,11 @@ out_err:
return -EOPNOTSUPP;
}
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
u8 htype = act->mangle.htype;
@@ -79,51 +78,11 @@ out_err:
return err;
}
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct netlink_ext_ack *extack)
-{
- u32 mask, val, offset;
- u32 *p;
-
- if (act->id != FLOW_ACTION_MANGLE) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
- return -EOPNOTSUPP;
- }
-
- if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
- NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
- return -EOPNOTSUPP;
- }
-
- mask = ~act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
- p = (u32 *)&parse_attr->eth;
- *(p + (offset >> 2)) |= (val & mask);
-
- return 0;
-}
-
-int
-mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(act, parse_attr, extack);
-
- return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
-}
-
static bool
tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -141,21 +100,16 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
ns_type = mlx5e_get_flow_namespace(flow);
- err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
- attr->parse_attr, parse_state->hdrs,
- flow, parse_state->extack);
+ err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, attr->parse_attr->hdrs,
+ parse_state->extack);
if (err)
return err;
- if (flow_flag_test(flow, L3_TO_L2_DECAP))
- goto out;
-
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
esw_attr->split_count = esw_attr->out_count;
-out:
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
index da8ab03af58f..434c8bd710a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
@@ -24,9 +24,7 @@ struct pedit_headers_action {
int
mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
index 0819110193dc..6454b031ff7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
index 1c32e24e528d..ad09a8a5f36e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -7,16 +7,16 @@
static bool
tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct net_device *out_dev = act->dev;
struct mlx5_esw_flow_attr *esw_attr;
- parse_attr = flow->attr->parse_attr;
- esw_attr = flow->attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
if (!out_dev)
return false;
@@ -58,8 +58,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *out_dev = act->dev;
int err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
index 6699bdf5cf01..2c0196431302 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -4,17 +4,21 @@
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
+#include "en/tc/act/sample.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
+ bool ct_nat;
- if (flow_flag_test(parse_state->flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+
+ if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
return false;
}
@@ -27,11 +31,7 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_sample_attr *sample_attr;
-
- sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
- if (!sample_attr)
- return -ENOMEM;
+ struct mlx5e_sample_attr *sample_attr = &attr->sample_attr;
sample_attr->rate = act->sample.rate;
sample_attr->group_num = act->sample.psample_group->group_num;
@@ -39,13 +39,33 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
if (act->sample.truncate)
sample_attr->trunc_size = act->sample.trunc_size;
- attr->sample_attr = sample_attr;
+ attr->flags |= MLX5_ATTR_FLAG_SAMPLE;
flow_flag_set(parse_state->flow, SAMPLE);
return 0;
}
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr)
+{
+ if (MLX5_CAP_GEN(mdev, reg_c_preserve) ||
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return true;
+
+ return false;
+}
+
+static bool
+tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return mlx5e_tc_act_sample_is_multi_table(priv->mdev, attr);
+}
+
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
+ .is_multi_table_act = tc_act_is_multi_table_act_sample,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h
new file mode 100644
index 000000000000..3efb3a15c5d2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_SAMPLE_H__
+#define __MLX5_EN_TC_ACT_SAMPLE_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr);
+
+#endif /* __MLX5_EN_TC_ACT_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index 046b64c2cec4..a7d9eab19e4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -7,7 +7,8 @@
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
@@ -25,9 +26,8 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
index 6f4a2cf46afd..b4fa2de9711d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -8,7 +8,8 @@
static bool
tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
if (!act->tunnel) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
@@ -34,7 +35,8 @@ tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
static bool
tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index 70fc0c2d8813..b86ac604d0c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -9,7 +9,6 @@
static int
add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
const struct flow_action_entry prio_tag_act = {
@@ -26,7 +25,7 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
};
return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- &prio_tag_act, parse_attr, hdrs, action,
+ &prio_tag_act, parse_attr, action,
extack);
}
@@ -35,7 +34,8 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ struct mlx5e_tc_act_parse_state *parse_state)
{
u8 vlan_idx = attr->total_vlan;
@@ -85,6 +85,16 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
+ case FLOW_ACTION_VLAN_POP_ETH:
+ parse_state->eth_pop = true;
+ break;
+ case FLOW_ACTION_VLAN_PUSH_ETH:
+ if (!flow_flag_test(parse_state->flow, L3_TO_L2_DECAP))
+ return -EOPNOTSUPP;
+ parse_state->eth_push = true;
+ memcpy(attr->eth.h_dest, act->vlan_push_eth.dst, ETH_ALEN);
+ memcpy(attr->eth.h_source, act->vlan_push_eth.src, ETH_ALEN);
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
@@ -110,7 +120,7 @@ mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack, NULL);
if (err)
return err;
@@ -140,7 +150,7 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
- extack);
+ extack, NULL);
if (err)
return err;
}
@@ -151,7 +161,8 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -170,11 +181,11 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
/* Replace vlan pop+push with vlan modify */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
- attr->parse_attr, parse_state->hdrs,
- &attr->action, parse_state->extack);
+ attr->parse_attr, &attr->action,
+ parse_state->extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
- parse_state->extack);
+ parse_state->extack, parse_state);
}
if (err)
@@ -191,7 +202,6 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
- struct pedit_headers_action *hdrs = parse_state->hdrs;
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
@@ -202,7 +212,7 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
* tag rewrite.
*/
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr,
&attr->action, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
index 3d62f13ab61f..2fa58c6f44eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
@@ -24,7 +24,6 @@ int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack);
#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 63e36e7f53e3..9a8a1a6bd99e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -12,7 +12,6 @@ int
mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
u32 *action, struct netlink_ext_ack *extack)
{
u16 mask16 = VLAN_VID_MASK;
@@ -44,8 +43,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
- NULL, extack);
+ err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr->hdrs,
+ extack);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -54,7 +53,8 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
- int act_index)
+ int act_index,
+ struct mlx5_flow_attr *attr)
{
return true;
}
@@ -69,8 +69,7 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
int err;
ns_type = mlx5e_get_flow_namespace(parse_state->flow);
- err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
- attr->parse_attr, parse_state->hdrs,
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act, attr->parse_attr,
&attr->action, parse_state->extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
new file mode 100644
index 000000000000..bb6b1a979ba1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_EN_TC_CT_FS_H__
+#define __MLX5_EN_TC_CT_FS_H__
+
+struct mlx5_ct_fs {
+ const struct net_device *netdev;
+ struct mlx5_core_dev *dev;
+
+ /* private data */
+ void *priv_data[];
+};
+
+struct mlx5_ct_fs_rule {
+};
+
+struct mlx5_ct_fs_ops {
+ int (*init)(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct);
+ void (*destroy)(struct mlx5_ct_fs *fs);
+
+ struct mlx5_ct_fs_rule * (*ct_rule_add)(struct mlx5_ct_fs *fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ struct flow_rule *flow_rule);
+ void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+
+ size_t priv_size;
+};
+
+static inline void *mlx5_ct_fs_priv(struct mlx5_ct_fs *fs)
+{
+ return &fs->priv_data;
+}
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void);
+
+#if IS_ENABLED(CONFIG_MLX5_SW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_smfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
+#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
new file mode 100644
index 000000000000..ae4f55be48ce
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(fs->netdev, "ct_fs_dmfs debug: " fmt "\n", ##args)
+
+struct mlx5_ct_fs_dmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+};
+
+static int
+mlx5_ct_fs_dmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ return 0;
+}
+
+static void
+mlx5_ct_fs_dmfs_destroy(struct mlx5_ct_fs *fs)
+{
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_dmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule;
+ int err;
+
+ dmfs_rule = kzalloc(sizeof(*dmfs_rule), GFP_KERNEL);
+ if (!dmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ dmfs_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(dmfs_rule->rule)) {
+ err = PTR_ERR(dmfs_rule->rule);
+ ct_dbg("Failed to add ct entry fs rule");
+ goto err_insert;
+ }
+
+ dmfs_rule->attr = attr;
+
+ return &dmfs_rule->fs_rule;
+
+err_insert:
+ kfree(dmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+
+ mlx5_tc_rule_delete(netdev_priv(fs->netdev), dmfs_rule->rule, dmfs_rule->attr);
+ kfree(dmfs_rule);
+}
+
+static struct mlx5_ct_fs_ops dmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+
+ .init = mlx5_ct_fs_dmfs_init,
+ .destroy = mlx5_ct_fs_dmfs_destroy,
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_dmfs_ops_get(void)
+{
+ return &dmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
new file mode 100644
index 000000000000..59988e24b704
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/refcount.h>
+
+#include "en_tc.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+
+#include "lib/smfs.h"
+
+#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+
+struct mlx5_ct_fs_smfs_matcher {
+ struct mlx5dr_matcher *dr_matcher;
+ struct list_head list;
+ int prio;
+ refcount_t ref;
+};
+
+struct mlx5_ct_fs_smfs_matchers {
+ struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
+ struct list_head used;
+};
+
+struct mlx5_ct_fs_smfs {
+ struct mlx5dr_table *ct_tbl, *ct_nat_tbl;
+ struct mlx5_ct_fs_smfs_matchers matchers;
+ struct mlx5_ct_fs_smfs_matchers matchers_nat;
+ struct mlx5dr_action *fwd_action;
+ struct mlx5_flow_table *ct_nat;
+ struct mutex lock; /* Guards matchers */
+};
+
+struct mlx5_ct_fs_smfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5dr_rule *rule;
+ struct mlx5dr_action *count_action;
+ struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+};
+
+static inline void
+mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+
+ if (likely(MLX5_CAP_FLOWTABLE_NIC_RX(fs->dev, ft_field_support.outer_ip_version)))
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ if (likely(ipv4)) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else {
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xFF,
+ MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xFF,
+ MLX5_FLD_SZ_BYTES(fte_match_set_lyr_2_4,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6));
+ }
+
+ if (likely(tcp)) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_sport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(MLX5_CT_TCP_FLAGS_MASK));
+ } else {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
+ }
+
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, 0, MLX5_CT_ZONE_MASK);
+}
+
+static struct mlx5dr_matcher *
+mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
+ bool tcp, u32 priority)
+{
+ struct mlx5dr_matcher *dr_matcher;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+
+ dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
+ kfree(spec);
+ if (!dr_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return dr_matcher;
+}
+
+static struct mlx5_ct_fs_smfs_matcher *
+mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
+ struct mlx5_ct_fs_smfs_matchers *matchers;
+ struct mlx5dr_matcher *dr_matcher;
+ struct mlx5dr_table *tbl;
+ struct list_head *prev;
+ int prio;
+
+ matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
+ smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
+
+ if (refcount_inc_not_zero(&smfs_matcher->ref))
+ return smfs_matcher;
+
+ mutex_lock(&fs_smfs->lock);
+
+ /* Retry with lock, as another thread might have already created the relevant matcher
+ * till we acquired the lock
+ */
+ if (refcount_inc_not_zero(&smfs_matcher->ref))
+ goto out_unlock;
+
+ // Find next available priority in sorted used list
+ prio = 0;
+ prev = &matchers->used;
+ list_for_each_entry(m, &matchers->used, list) {
+ prev = &m->list;
+
+ if (m->prio == prio)
+ prio = m->prio + 1;
+ else
+ break;
+ }
+
+ tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
+ dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
+ if (IS_ERR(dr_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
+ nat, ipv4, tcp, PTR_ERR(dr_matcher));
+
+ smfs_matcher = ERR_CAST(dr_matcher);
+ goto out_unlock;
+ }
+
+ smfs_matcher->dr_matcher = dr_matcher;
+ smfs_matcher->prio = prio;
+ list_add(&smfs_matcher->list, prev);
+ refcount_set(&smfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_smfs->lock);
+ return smfs_matcher;
+}
+
+static void
+mlx5_ct_fs_smfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_smfs_matcher *smfs_matcher)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&smfs_matcher->ref, &fs_smfs->lock))
+ return;
+
+ mlx5_smfs_matcher_destroy(smfs_matcher->dr_matcher);
+ list_del(&smfs_matcher->list);
+ mutex_unlock(&fs_smfs->lock);
+}
+
+static int
+mlx5_ct_fs_smfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ struct mlx5dr_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ post_ct_tbl = mlx5_smfs_table_get_from_fs_ft(post_ct);
+ ct_nat_tbl = mlx5_smfs_table_get_from_fs_ft(ct_nat);
+ ct_tbl = mlx5_smfs_table_get_from_fs_ft(ct);
+ fs_smfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_smfs: failed to init, missing backing dr tables");
+ return -EOPNOTSUPP;
+ }
+
+ ct_dbg("using smfs steering");
+
+ fs_smfs->fwd_action = mlx5_smfs_action_create_dest_table(post_ct_tbl);
+ if (!fs_smfs->fwd_action) {
+ return -EINVAL;
+ }
+
+ fs_smfs->ct_tbl = ct_tbl;
+ fs_smfs->ct_nat_tbl = ct_nat_tbl;
+ mutex_init(&fs_smfs->lock);
+ INIT_LIST_HEAD(&fs_smfs->matchers.used);
+ INIT_LIST_HEAD(&fs_smfs->matchers_nat.used);
+
+ return 0;
+}
+
+static void
+mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+
+ mlx5_smfs_action_destroy(fs_smfs->fwd_action);
+}
+
+static inline bool
+mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
+{
+#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
+ const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
+ DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
+ const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
+ const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
+ const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
+ const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp);
+}
+
+static bool
+mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ ct_dbg("rule uses unexpected dissectors (0x%08x)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ flow_rule_match_ports(flow_rule, &ports);
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
+ ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
+ ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5_ct_fs_smfs_matcher *smfs_matcher;
+ struct mlx5_ct_fs_smfs_rule *smfs_rule;
+ struct mlx5dr_action *actions[5];
+ struct mlx5dr_rule *rule;
+ int num_actions = 0, err;
+ bool nat, tcp, ipv4;
+
+ if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
+ if (!smfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ smfs_rule->count_action = mlx5_smfs_action_create_flow_counter(mlx5_fc_id(attr->counter));
+ if (!smfs_rule->count_action) {
+ err = -EINVAL;
+ goto err_count;
+ }
+
+ actions[num_actions++] = smfs_rule->count_action;
+ actions[num_actions++] = attr->modify_hdr->action.dr_action;
+ actions[num_actions++] = fs_smfs->fwd_action;
+
+ nat = (attr->ft == fs_smfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+
+ smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
+ if (IS_ERR(smfs_matcher)) {
+ err = PTR_ERR(smfs_matcher);
+ goto err_matcher;
+ }
+
+ rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_create;
+ }
+
+ smfs_rule->rule = rule;
+ smfs_rule->smfs_matcher = smfs_matcher;
+
+ return &smfs_rule->fs_rule;
+
+err_create:
+ mlx5_ct_fs_smfs_matcher_put(fs, smfs_matcher);
+err_matcher:
+ mlx5_smfs_action_destroy(smfs_rule->count_action);
+err_count:
+ kfree(smfs_rule);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ mlx5_ct_fs_smfs_matcher_put(fs, smfs_rule->smfs_matcher);
+ mlx5_smfs_action_destroy(smfs_rule->count_action);
+ kfree(smfs_rule);
+}
+
+static struct mlx5_ct_fs_ops fs_smfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+
+ .init = mlx5_ct_fs_smfs_init,
+ .destroy = mlx5_ct_fs_smfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_smfs),
+};
+
+struct mlx5_ct_fs_ops *
+mlx5_ct_fs_smfs_ops_get(void)
+{
+ return &fs_smfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 31b4e39be2d3..dea137dd744b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#include "en/tc_priv.h"
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
@@ -75,21 +76,47 @@ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
kfree(post_act);
}
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Post action rule matches on fte_id and executes original rule's tc rule action */
+ mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
+
+ handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
+ if (IS_ERR(handle->rule)) {
+ err = PTR_ERR(handle->rule);
+ netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
+ goto err_rule;
+ }
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
{
u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
- struct mlx5e_post_act_handle *handle = NULL;
- struct mlx5_flow_attr *post_attr = NULL;
- struct mlx5_flow_spec *spec = NULL;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *post_attr;
int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
- if (!handle || !spec || !post_attr) {
+ if (!handle || !post_attr) {
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(-ENOMEM);
}
@@ -100,7 +127,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->ft = post_act->ft;
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
- post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
@@ -112,36 +139,29 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
if (err)
goto err_xarray;
- /* Post action rule matches on fte_id and executes original rule's
- * tc rule action
- */
- mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG,
- handle->id, MLX5_POST_ACTION_MASK);
-
- handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr);
- if (IS_ERR(handle->rule)) {
- err = PTR_ERR(handle->rule);
- netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
- goto err_rule;
- }
handle->attr = post_attr;
- kvfree(spec);
return handle;
-err_rule:
- xa_erase(&post_act->ids, handle->id);
err_xarray:
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(err);
}
void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
+ handle->rule = NULL;
+}
+
+void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
{
- mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr);
+ if (!IS_ERR_OR_NULL(handle->rule))
+ mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id);
kfree(handle->attr);
kfree(handle);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
index b530ec1981a5..f476774c0b75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
@@ -24,6 +24,14 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
+void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
struct mlx5_flow_table *
mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index ff4b4f8a5a9d..fd4504518578 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/tc/act/sample.h"
#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
@@ -46,14 +47,12 @@ struct mlx5e_sample_flow {
struct mlx5_flow_handle *pre_rule;
struct mlx5_flow_attr *post_attr;
struct mlx5_flow_handle *post_rule;
- struct mlx5e_post_act_handle *post_act_handle;
};
struct mlx5e_sample_restore {
struct hlist_node hlist;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *rule;
- struct mlx5e_post_act_handle *post_act_handle;
u32 obj_id;
int count;
};
@@ -231,69 +230,46 @@ sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
*/
static struct mlx5_modify_hdr *
sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
- struct mlx5e_post_act_handle *handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5_modify_hdr *modify_hdr;
int err;
- err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
CHAIN_TO_REG, obj_id);
if (err)
goto err_set_regc0;
- if (handle) {
- err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts);
- if (err)
- goto err_post_act;
- }
-
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_acts.num_actions,
- mod_acts.actions);
+ mod_acts->num_actions,
+ mod_acts->actions);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
}
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
return modify_hdr;
err_modify_hdr:
-err_post_act:
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
-static u32
-restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle)
-{
- return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0);
-}
-
-static bool
-restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
-{
- return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle;
-}
-
static struct mlx5e_sample_restore *
sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct mlx5_eswitch *esw = tc_psample->esw;
struct mlx5_core_dev *mdev = esw->dev;
struct mlx5e_sample_restore *restore;
struct mlx5_modify_hdr *modify_hdr;
- u32 hash_key;
int err;
mutex_lock(&tc_psample->restore_lock);
- hash_key = restore_hash(obj_id, post_act_handle);
- hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key)
- if (restore_equal(restore, obj_id, post_act_handle))
+ hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
+ if (restore->obj_id == obj_id)
goto add_ref;
restore = kzalloc(sizeof(*restore), GFP_KERNEL);
@@ -302,9 +278,8 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_alloc;
}
restore->obj_id = obj_id;
- restore->post_act_handle = post_act_handle;
- modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle);
+ modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
@@ -317,7 +292,7 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_restore;
}
- hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key);
+ hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
add_ref:
restore->count++;
mutex_unlock(&tc_psample->restore_lock);
@@ -403,7 +378,7 @@ add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
post_attr->chain = 0;
post_attr->prio = 0;
post_attr->ft = default_tbl;
- post_attr->flags = MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ post_attr->flags = MLX5_ATTR_FLAG_NO_IN_PORT;
/* When offloading sample and encap action, if there is no valid
* neigh data struct, a slow path rule is offloaded first. Source
@@ -492,16 +467,16 @@ del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id)
+ struct mlx5_flow_attr *attr)
{
- struct mlx5e_post_act_handle *post_act_handle = NULL;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_esw_flow_attr *pre_esw_attr;
struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5e_sample_flow *sample_flow;
struct mlx5e_sample_attr *sample_attr;
struct mlx5_flow_attr *pre_attr;
+ u32 tunnel_id = attr->tunnel_id;
struct mlx5_eswitch *esw;
u32 default_tbl_id;
u32 obj_id;
@@ -513,7 +488,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
if (!sample_flow)
return ERR_PTR(-ENOMEM);
- sample_attr = attr->sample_attr;
+ sample_attr = &attr->sample_attr;
sample_attr->sample_flow = sample_flow;
/* For NICs with reg_c_preserve support or decap action, use
@@ -522,18 +497,11 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
* original flow table.
*/
esw = tc_psample->esw;
- if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
- attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
struct mlx5_flow_table *ft;
ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
default_tbl_id = ft->id;
- post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr);
- if (IS_ERR(post_act_handle)) {
- err = PTR_ERR(post_act_handle);
- goto err_post_act;
- }
- sample_flow->post_act_handle = post_act_handle;
} else {
err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
if (err)
@@ -546,6 +514,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
err = PTR_ERR(sample_flow->sampler);
goto err_sampler;
}
+ sample_attr->sampler_id = sample_flow->sampler->sampler_id;
/* Create an id mapping reg_c0 value to sample object. */
restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
@@ -559,7 +528,8 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_attr->restore_obj_id = obj_id;
/* Create sample restore context. */
- sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle);
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+ sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
if (IS_ERR(sample_flow->restore)) {
err = PTR_ERR(sample_flow->restore);
goto err_sample_restore;
@@ -580,13 +550,13 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
if (tunnel_id)
pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
- pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
+ pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
pre_attr->inner_match_level = attr->inner_match_level;
pre_attr->outer_match_level = attr->outer_match_level;
pre_attr->chain = attr->chain;
pre_attr->prio = attr->prio;
- pre_attr->sample_attr = attr->sample_attr;
- sample_attr->sampler_id = sample_flow->sampler->sampler_id;
+ pre_attr->ft = attr->ft;
+ pre_attr->sample_attr = *sample_attr;
pre_esw_attr = pre_attr->esw_attr;
pre_esw_attr->in_mdev = esw_attr->in_mdev;
pre_esw_attr->in_rep = esw_attr->in_rep;
@@ -611,9 +581,6 @@ err_sampler:
if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
- if (post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle);
-err_post_act:
kfree(sample_flow);
return ERR_PTR(err);
}
@@ -633,15 +600,13 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
* will hit fw syndromes.
*/
esw = tc_psample->esw;
- sample_flow = attr->sample_attr->sample_flow;
+ sample_flow = attr->sample_attr.sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
sample_restore_put(tc_psample, sample_flow->restore);
- mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
+ mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- else
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index 9ef8a49d7801..a569367eae4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -26,8 +26,7 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id);
+ struct mlx5_flow_attr *attr);
void
mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv,
@@ -45,8 +44,7 @@ mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
static inline struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr,
- u32 tunnel_id)
+ struct mlx5_flow_attr *attr)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 4a0d38d219ed..e49f51124c74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -18,15 +18,16 @@
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
+#include "en/tc/ct_fs.h"
+#include "en/tc_priv.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
#include "en/tc/post_act.h"
#include "en.h"
#include "en_tc.h"
#include "en_rep.h"
+#include "fs_core.h"
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
@@ -62,19 +63,20 @@ struct mlx5_tc_ct_priv {
struct mapping_ctx *labels_mapping;
enum mlx5_flow_namespace_type ns_type;
struct mlx5_fs_chains *chains;
+ struct mlx5_ct_fs *fs;
+ struct mlx5_ct_fs_ops *fs_ops;
spinlock_t ht_lock; /* protects ft entries */
};
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
- struct mlx5e_post_act_handle *post_act_handle;
struct mlx5_ct_ft *ft;
u32 chain_mapping;
};
struct mlx5_ct_zone_rule {
- struct mlx5_flow_handle *rule;
+ struct mlx5_ct_fs_rule *rule;
struct mlx5e_mod_hdr_handle *mh;
struct mlx5_flow_attr *attr;
bool nat;
@@ -258,7 +260,8 @@ mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
return -EOPNOTSUPP;
}
} else {
- return -EOPNOTSUPP;
+ if (tuple->ip_proto != IPPROTO_GRE)
+ return -EOPNOTSUPP;
}
return 0;
@@ -505,7 +508,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
- mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
@@ -807,16 +810,20 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_chain = 0;
attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- attr->outer_match_level = MLX5_MATCH_L4;
+ if (entry->tuple.ip_proto == IPPROTO_TCP ||
+ entry->tuple.ip_proto == IPPROTO_UDP)
+ attr->outer_match_level = MLX5_MATCH_L4;
+ else
+ attr->outer_match_level = MLX5_MATCH_L3;
attr->counter = entry->counter->counter;
- attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->in_mdev = priv->mdev;
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
+ zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat);
@@ -1154,7 +1161,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
}
rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
- mlx5_tc_ct_entry_remove_from_tuples(entry);
spin_unlock_bh(&ct_priv->ht_lock);
mlx5_tc_ct_entry_put(entry);
@@ -1224,16 +1230,20 @@ mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
struct flow_keys flow_keys;
skb_reset_network_header(skb);
- skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
+ skb_flow_dissect_flow_keys(skb, &flow_keys, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
tuple->zone = zone;
if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
- flow_keys.basic.ip_proto != IPPROTO_UDP)
+ flow_keys.basic.ip_proto != IPPROTO_UDP &&
+ flow_keys.basic.ip_proto != IPPROTO_GRE)
return false;
- tuple->port.src = flow_keys.ports.src;
- tuple->port.dst = flow_keys.ports.dst;
+ if (flow_keys.basic.ip_proto == IPPROTO_TCP ||
+ flow_keys.basic.ip_proto == IPPROTO_UDP) {
+ tuple->port.src = flow_keys.ports.src;
+ tuple->port.dst = flow_keys.ports.dst;
+ }
tuple->n_proto = flow_keys.basic.n_proto;
tuple->ip_proto = flow_keys.basic.ip_proto;
@@ -1756,7 +1766,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + ft prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
@@ -1766,7 +1776,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* v
* +---------------------+
* + pre_ct/pre_ct_nat + if matches +-------------------------+
- * + zone+nat match +---------------->+ post_act (see below) +
+ * + zone+nat match +---------------->+ post_act (see below) +
* +---------------------+ set zone +-------------------------+
* | set zone
* v
@@ -1781,21 +1791,19 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
- * + post_act + original filter actions
+ * + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*/
static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec,
struct mlx5_flow_attr *attr)
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
@@ -1818,14 +1826,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->ft = ft;
- handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- ct_dbg("Failed to allocate post action handle");
- goto err_post_act_handle;
- }
- ct_flow->post_act_handle = handle;
-
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!ct_flow->pre_ct_attr) {
@@ -1835,6 +1835,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
+ pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1853,30 +1854,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts);
- if (err) {
- ct_dbg("Failed to set post action handle");
- goto err_mapping;
- }
-
/* If original flow is decap, we do it before going into ct table
* so add a rewrite for the tunnel match_id.
*/
if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
attr->chain == 0) {
- u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
-
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
ct_priv->ns_type,
TUNNEL_TO_REG,
- tun_id);
+ attr->tunnel_id);
if (err) {
ct_dbg("Failed to set tunnel register mapping");
goto err_mapping;
@@ -1884,8 +1877,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- pre_mod_acts.num_actions,
- pre_mod_acts.actions);
+ pre_mod_acts->num_actions,
+ pre_mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to create pre ct mod hdr");
@@ -1905,20 +1898,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
- mlx5e_tc_post_act_del(ct_priv->post_act, handle);
-err_post_act_handle:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
kfree(ct_flow);
@@ -1926,87 +1917,19 @@ err_ft:
return ERR_PTR(err);
}
-static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_flow_spec *orig_spec,
- struct mlx5_flow_attr *attr,
- struct mlx5e_tc_mod_hdr_acts *mod_acts)
-{
- struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5_flow_attr *pre_ct_attr;
- struct mlx5_modify_hdr *mod_hdr;
- struct mlx5_flow_handle *rule;
- struct mlx5_ct_flow *ct_flow;
- int err;
-
- ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
- if (!ct_flow)
- return ERR_PTR(-ENOMEM);
-
- /* Base esw attributes on original rule attribute */
- pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
- if (!pre_ct_attr) {
- err = -ENOMEM;
- goto err_attr;
- }
-
- memcpy(pre_ct_attr, attr, attr_sz);
-
- mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- mod_acts->num_actions,
- mod_acts->actions);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- ct_dbg("Failed to add create ct clear mod hdr");
- goto err_mod_hdr;
- }
-
- pre_ct_attr->modify_hdr = mod_hdr;
-
- rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add ct clear rule");
- goto err_insert;
- }
-
- attr->ct_attr.ct_flow = ct_flow;
- ct_flow->pre_ct_attr = pre_ct_attr;
- ct_flow->pre_ct_rule = rule;
- return rule;
-
-err_insert:
- mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
-err_mod_hdr:
- netdev_warn(priv->netdev,
- "Failed to offload ct clear flow, err %d\n", err);
- kfree(pre_ct_attr);
-err_attr:
- kfree(ct_flow);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
- bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_flow_handle *rule;
if (!priv)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&priv->control_lock);
-
- if (clear_action)
- rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
- else
- rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
+ rule = __mlx5_tc_ct_flow_offload(priv, spec, attr);
mutex_unlock(&priv->control_lock);
return rule;
@@ -2014,21 +1937,17 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_ct_flow *ct_flow)
+ struct mlx5_ct_flow *ct_flow,
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
- pre_ct_attr);
+ mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- if (ct_flow->post_act_handle) {
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
- mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle);
- mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
- }
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
kfree(ct_flow);
@@ -2036,7 +1955,6 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
@@ -2048,11 +1966,43 @@ mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
return;
mutex_lock(&priv->control_lock);
- __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+ __mlx5_tc_ct_delete_flow(priv, ct_flow, attr);
mutex_unlock(&priv->control_lock);
}
static int
+mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
+ struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
+ int err;
+
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
+ ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
+ if (!ct_priv->fs)
+ return -ENOMEM;
+
+ ct_priv->fs->netdev = ct_priv->netdev;
+ ct_priv->fs->dev = ct_priv->dev;
+ ct_priv->fs_ops = fs_ops;
+
+ err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct);
+ if (err)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ kfree(ct_priv->fs);
+ return err;
+}
+
+static int
mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
const char **err_msg)
{
@@ -2190,8 +2140,14 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
goto err_ct_tuples_nat_ht;
+ err = mlx5_tc_ct_fs_init(ct_priv);
+ if (err)
+ goto err_init_fs;
+
return ct_priv;
+err_init_fs:
+ rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
err_ct_tuples_nat_ht:
rhashtable_destroy(&ct_priv->ct_tuples_ht);
err_ct_tuples_ht:
@@ -2222,6 +2178,9 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
chains = ct_priv->chains;
+ ct_priv->fs_ops->destroy(ct_priv->fs);
+ kfree(ct_priv->fs);
+
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 99662af1e41a..36d3652bf829 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -86,6 +86,8 @@ struct mlx5_ct_attr {
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
+#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
@@ -116,13 +118,11 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
bool
@@ -183,7 +183,6 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
static inline struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
@@ -193,7 +192,6 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
static inline void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
- struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 70b40ae384e4..3b74a6fd5c43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -38,9 +38,9 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
+ struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
- struct ethhdr eth;
struct mlx5e_tc_act_parse_state parse_state;
};
@@ -108,10 +108,20 @@ struct mlx5e_tc_flow {
struct rcu_head rcu_head;
struct completion init_done;
struct completion del_hw_done;
- int tunnel_id; /* the mapped tunnel id of this flow */
struct mlx5_flow_attr *attr;
+ struct list_head attrs;
};
+struct mlx5_flow_handle *
+mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+void
+mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
struct mlx5_flow_handle *
@@ -120,6 +130,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
+
+void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
+int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
+
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
@@ -174,6 +190,7 @@ struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec);
+
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index d39d0dae22fc..5aff97914367 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Do not offload flows with unresolved neighbors */
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
continue;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ continue;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
@@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5e_tc_unoffload_flow_post_acts(flow);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -488,12 +500,17 @@ static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
int out_index);
void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ int out_index)
{
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- if (flow->attr->esw_attr->dests[out_index].flags &
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ if (attr->esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5e_detach_encap_route(priv, flow, out_index);
@@ -733,6 +750,7 @@ static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv)
static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct mlx5e_encap_entry *e,
bool new_encap_entry,
unsigned long tbl_time_before,
@@ -740,6 +758,7 @@ static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
@@ -748,7 +767,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
@@ -837,8 +855,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
e->compl_result = 1;
attach_flow:
- err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before,
- out_index);
+ err = mlx5e_attach_encap_route(priv, flow, attr, e, entry_created,
+ tbl_time_before, out_index);
if (err)
goto out_err;
@@ -888,20 +906,18 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_pkt_reformat_params reformat_params;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
uintptr_t hash_key;
int err = 0;
- parse_attr = flow->attr->parse_attr;
- if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ if (sizeof(attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
NL_SET_ERR_MSG_MOD(extack,
"encap header larger than max supported");
return -EOPNOTSUPP;
}
- key.key = parse_attr->eth;
+ key.key = attr->eth;
hash_key = hash_decap_info(&key);
mutex_lock(&esw->offloads.decap_tbl_lock);
d = mlx5e_decap_get(priv, &key, hash_key);
@@ -931,8 +947,8 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
memset(&reformat_params, 0, sizeof(reformat_params));
reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
- reformat_params.size = sizeof(parse_attr->eth);
- reformat_params.data = &parse_attr->eth;
+ reformat_params.size = sizeof(attr->eth);
+ reformat_params.data = &attr->eth;
d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
@@ -1201,6 +1217,7 @@ out:
static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct mlx5e_encap_entry *e,
bool new_encap_entry,
unsigned long tbl_time_before,
@@ -1209,7 +1226,6 @@ static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned long tbl_time_after = tbl_time_before;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_route_entry *r;
@@ -1360,17 +1376,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
if (flow_flag_test(flow, FAILED))
continue;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
- spec = &parse_attr->spec;
err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
e->out_dev, e->route_dev_ifindex,
@@ -1380,7 +1398,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
continue;
}
- err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
err);
@@ -1392,9 +1410,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
goto offload_to_slow_path;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ goto offload_to_slow_path;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index 3391504d9a08..d542b8476491 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -7,15 +7,19 @@
#include "tc_priv.h"
void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index);
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ int out_index);
int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
+
int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index b789af07829c..c208ea307bff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -9,19 +9,6 @@
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
-
-#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
-
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
@@ -57,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle);
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
@@ -68,8 +53,6 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
/* TX */
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
@@ -308,9 +291,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
- return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+ return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
@@ -431,10 +414,10 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
-static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
-{
- BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
+#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
+static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
+{
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
* 2. If the WQE size is X, and the space remaining in a page is less
@@ -443,18 +426,28 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
* stop room of X-1 + X.
* WQE size is also limited by the hardware limit.
*/
+ WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
- if (__builtin_constant_p(wqe_size))
- BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- else
- WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- return wqe_size * 2 - 1;
+ return MLX5E_STOP_ROOM(wqe_size);
+}
+
+static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
+{
+ return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
}
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{
- u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+ u16 room = sq->reserved_room;
+
+ WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, sq->max_sq_wqebbs);
+
+ room += MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 56e10c84a706..8f321a6c0809 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -57,12 +57,14 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
- struct mlx5e_dma_info *di, struct xdp_buff *xdp)
+ struct page *page, struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo = NULL;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
+ int i;
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
@@ -96,46 +98,77 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd.dma_addr = dma_addr;
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr;
- } else {
- /* Driver assumes that xdp_convert_buff_to_frame returns
- * an xdp_frame that points to the same memory region as
- * the original xdp_buff. It allows to map the memory only
- * once and to use the DMA_BIDIRECTIONAL mode.
- */
- xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+ return false;
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ return true;
+ }
+
+ /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame
+ * that points to the same memory region as the original xdp_buff. It
+ * allows to map the memory only once and to use the DMA_BIDIRECTIONAL
+ * mode.
+ */
+
+ xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
+ xdpi.page.rq = rq;
+
+ dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
- dma_addr = di->addr + (xdpf->data - (void *)xdpf);
- dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
- DMA_TO_DEVICE);
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+ u32 len;
- xdptxd.dma_addr = dma_addr;
- xdpi.page.rq = rq;
- xdpi.page.di = *di;
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+ len = skb_frag_size(frag);
+ dma_sync_single_for_device(sq->pdev, addr, len,
+ DMA_TO_DEVICE);
+ }
}
- return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+ xdptxd.dma_addr = dma_addr;
+
+ if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
+ return false;
+
+ xdpi.page.page = page;
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ xdpi.page.page = skb_frag_page(frag);
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+ }
+ }
+
+ return true;
}
/* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 *len, struct xdp_buff *xdp)
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+ struct bpf_prog *prog, struct xdp_buff *xdp)
{
- struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
u32 act;
int err;
- if (!prog)
- return false;
-
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
- *len = xdp->data_end - xdp->data;
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
@@ -147,7 +180,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
- mlx5e_page_dma_unmap(rq, di);
+ mlx5e_page_dma_unmap(rq, page);
rq->stats->xdp_redirect++;
return true;
default:
@@ -199,7 +232,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -245,10 +278,8 @@ enum {
INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
- const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- stop_room))) {
+ sq->stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -262,12 +293,26 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
}
INDIRECT_CALLABLE_SCOPE bool
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
+ struct skb_shared_info *sinfo, int check_result);
+
+INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi, int check_result)
+ struct skb_shared_info *sinfo, int check_result)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ if (unlikely(sinfo)) {
+ /* MPWQE is enabled, but a multi-buffer packet is queued for
+ * transmission. MPWQE can't send fragmented packets, so close
+ * the current session and fall back to a regular WQE.
+ */
+ if (unlikely(sq->mpwqe.wqe))
+ mlx5e_xdp_mpwqe_complete(sq);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
+ }
+
if (unlikely(xdptxd->len > sq->hw_mtu)) {
stats->err++;
return false;
@@ -288,17 +333,16 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
+ if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
-INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
{
- if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -308,45 +352,76 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK;
}
+INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+{
+ return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
+}
+
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi, int check_result)
+ struct skb_shared_info *sinfo, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg = wqe->data;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5_wqe_eth_seg *eseg;
+ struct mlx5e_tx_wqe *wqe;
dma_addr_t dma_addr = xdptxd->dma_addr;
u32 dma_len = xdptxd->len;
+ u16 ds_cnt, inline_hdr_sz;
+ u8 num_wqebbs = 1;
+ int num_frags = 0;
+ u16 pi;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- net_prefetchw(wqe);
-
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
return false;
}
- if (!check_result)
- check_result = mlx5e_xmit_xdp_frame_check(sq);
+ ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
+ ds_cnt++;
+
+ /* check_result must be 0 if sinfo is passed. */
+ if (!check_result) {
+ int stop_room = 1;
+
+ if (unlikely(sinfo)) {
+ ds_cnt += sinfo->nr_frags;
+ num_frags = sinfo->nr_frags;
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
+ * enough to hold all fragments.
+ */
+ stop_room = MLX5E_STOP_ROOM(num_wqebbs);
+ }
+
+ check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
+ }
if (unlikely(check_result < 0))
return false;
- cseg->fm_ce_se = 0;
+ pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ net_prefetchw(wqe);
+
+ cseg = &wqe->ctrl;
+ eseg = &wqe->eth;
+ dseg = wqe->data;
+
+ inline_hdr_sz = 0;
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
- eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
dseg++;
}
@@ -356,11 +431,45 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- sq->pc++;
+ if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
+ u8 num_pkts = 1 + num_frags;
+ int i;
+
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
+
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ dseg->lkey = sq->mkey_be;
+
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+
+ addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+
+ dseg++;
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ }
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = num_pkts,
+ };
+
+ sq->pc += num_wqebbs;
+ } else {
+ cseg->fm_ce_se = 0;
+
+ sq->pc++;
+ }
sq->doorbell_cseg = cseg;
- mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
@@ -386,7 +495,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
- mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
+ mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
break;
case MLX5E_XDP_XMIT_MODE_XSK:
/* AF_XDP send */
@@ -539,12 +648,13 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.frame.dma_addr = xdptxd.dma_addr;
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE);
break;
}
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
nxmit++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 8d991c3b7a50..287e17911251 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -38,7 +38,6 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
@@ -47,8 +46,8 @@
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 *len, struct xdp_buff *xdp);
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
+ struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -59,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi,
+ struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- struct mlx5e_xdp_info *xdpi,
+ struct skb_shared_info *sinfo,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
@@ -123,12 +122,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- MLX5E_TX_MPW_MAX_NUM_DS;
- return mlx5e_tx_mpwqe_is_full(session);
+ max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+
+ return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 8e7b877d8a12..021da085e603 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -4,6 +4,7 @@
#include "rx.h"
#include "en/xdp.h"
#include <net/xdp_sock_drv.h>
+#include <linux/filter.h>
/* RX data path */
@@ -30,7 +31,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 page_idx)
{
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
- u32 cqe_bcnt32 = cqe_bcnt;
+ struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -45,7 +46,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xdp->data_end = xdp->data + cqe_bcnt32;
+ xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -65,7 +66,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
* allocated first from the Reuse Ring, so it has enough space.
*/
- if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
+ prog = rcu_dereference(rq->xdp_prog);
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -74,7 +76,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
@@ -83,6 +85,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->di->xsk;
+ struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
* DMA address point directly to the necessary place. Furthermore, the
@@ -101,12 +104,13 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
return NULL;
}
- if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
+ prog = rcu_dereference(rq->xdp_prog);
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_put_rx_frag.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);
+ return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 25eac9e20342..3ad7f1301fa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -43,7 +43,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 8e96260fce1d..3ec0c17db010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -103,12 +103,15 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
+ mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
+ check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xsk_tx_post_err(sq, &xdpi);
+ } else {
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index d964665eaa63..62cde3e87c2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -139,15 +139,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return true;
}
-static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
-{
-#ifdef CONFIG_MLX5_EN_IPSEC
- return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#else
- return false;
-#endif
-}
-
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
struct mlx5e_accel_tx_state *state)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 7cab08a2f715..299e3f0fcb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -35,7 +35,6 @@
#include <crypto/aead.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
-#include <linux/module.h>
#include "en.h"
#include "en_accel/ipsec.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 9ad3459fb63a..aaf11c66bf4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -32,9 +32,9 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
- stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
+ stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
return stop_room;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 7a700f913582..a05580cea481 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -386,5 +386,5 @@ u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
/* FPGA */
/* Resync SKB. */
- return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ return mlx5e_stop_room_for_max_wqe(mdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index a4c8d8d00d5a..d659fe07d464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1142,7 +1142,7 @@ static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
err = mlx5_set_trust_state(priv->mdev, *trust_state);
if (err)
return err;
- priv->dcbx_dp.trust_state = *trust_state;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
return 0;
}
@@ -1187,16 +1187,18 @@ static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u8 trust_state;
int err;
- priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
-
- if (!MLX5_DSCP_SUPPORTED(mdev))
+ if (!MLX5_DSCP_SUPPORTED(mdev)) {
+ WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
return 0;
+ }
- err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ err = mlx5_query_trust_state(priv->mdev, &trust_state);
if (err)
return err;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3667f5ef5990..2f1dedc721d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -72,12 +72,13 @@
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
- bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
- u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
- bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
+ bool striding_rq_umr, inline_umr;
+ u16 max_wqe_sz_cap;
+ striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+ max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
if (!striding_rq_umr)
return false;
if (!inline_umr) {
@@ -594,6 +595,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
@@ -778,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
- mlx5e_page_release_dynamic(rq, dma_info, false);
+ mlx5e_page_release_dynamic(rq, dma_info->page, false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
@@ -1164,6 +1166,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1238,6 +1243,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1313,7 +1319,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int err;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
@@ -1324,6 +1329,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1659,14 +1666,22 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+
+ /* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
+ * supported by upstream, and there is no defined trigger to allow
+ * transmitting redirected multi-buffer frames.
+ */
+ if (param->is_xdp_mb && !is_redirect)
+ set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
+
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw) {
- unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
+ unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
unsigned int inline_hdr_sz = 0;
int i;
@@ -2677,39 +2692,41 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
- priv->channel_tc2realtxq[i][tc] = i + tc * ch;
}
}
if (!priv->channels.ptp)
- return;
+ goto out;
if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
- return;
+ goto out;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
- priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
}
-}
-static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
-{
- /* Sync with mlx5e_select_queue. */
- WRITE_ONCE(priv->num_tc_x_num_ch,
- mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num);
+out:
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
- mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
+
+ /* dev_watchdog() wants all TX queues to be started when the carrier is
+ * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
+ * Make it happy to avoid TX timeout false alarms.
+ */
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
@@ -2729,11 +2746,13 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
- /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
+ /* The results of ndo_select_queue are unreliable, while netdev config
+ * is being changed (real_num_tx_queues, num_tc). Stop all queues to
+ * prevent ndo_start_xmit from being called, so that it can assume that
+ * the selected queue is always valid.
*/
- netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
+
mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
@@ -2793,6 +2812,7 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
out:
mlx5e_activate_priv_channels(priv);
@@ -2816,13 +2836,24 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
return mlx5e_switch_priv_params(priv, params, preactivate, context);
new_chs.params = *params;
+
+ mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+
err = mlx5e_open_channels(priv, &new_chs);
if (err)
- return err;
+ goto err_cancel_selq;
+
err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
if (err)
- mlx5e_close_channels(&new_chs);
+ goto err_close;
+
+ return 0;
+
+err_close:
+ mlx5e_close_channels(&new_chs);
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -2862,6 +2893,8 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+
set_bit(MLX5E_STATE_OPENED, &priv->state);
err = mlx5e_open_channels(priv, &priv->channels);
@@ -2869,6 +2902,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
@@ -2879,6 +2913,7 @@ int mlx5e_open_locked(struct net_device *netdev)
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
@@ -3918,6 +3953,31 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
+static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
+{
+ bool is_linear;
+
+ /* No XSK params: AF_XDP can't be enabled yet at the point of setting
+ * the XDP program.
+ */
+ is_linear = mlx5e_rx_is_linear_skb(params, NULL);
+
+ if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+ netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+ if (!is_linear && !params->xdp_prog->aux->xdp_has_frags) {
+ netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
+ params->sw_mtu,
+ mlx5e_xdp_max_mtu(params, NULL));
+ return false;
+ }
+
+ return true;
+}
+
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
@@ -3937,10 +3997,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
- if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(&new_params, NULL)) {
- netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
- new_mtu, mlx5e_xdp_max_mtu(params, NULL));
+ if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
err = -EINVAL;
goto out;
}
@@ -4423,15 +4480,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- /* No XSK params: AF_XDP can't be enabled yet at the point of setting
- * the XDP program.
- */
- if (!mlx5e_rx_is_linear_skb(&new_params, NULL)) {
- netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
- new_params.sw_mtu,
- mlx5e_xdp_max_mtu(&new_params, NULL));
+ if (!mlx5e_params_validate_xdp(netdev, &new_params))
return -EINVAL;
- }
return 0;
}
@@ -4636,11 +4686,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
priv->max_nch);
mlx5e_params_mqprio_reset(params);
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -5193,7 +5238,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
- int nch, num_txqs, node, i;
+ int nch, num_txqs, node;
+ int err;
num_txqs = netdev->num_tx_queues;
nch = mlx5e_calc_max_nch(mdev, netdev, profile);
@@ -5210,6 +5256,11 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
return -ENOMEM;
mutex_init(&priv->state_lock);
+
+ err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
+ if (err)
+ goto err_free_cpumask;
+
hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
@@ -5218,7 +5269,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_cpumask;
+ goto err_free_selq;
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
if (!priv->txq2sq)
@@ -5228,36 +5279,21 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->tx_rates)
goto err_free_txq2sq;
- priv->channel_tc2realtxq =
- kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq)
- goto err_free_tx_rates;
-
- for (i = 0; i < nch; i++) {
- priv->channel_tc2realtxq[i] =
- kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
- GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq[i])
- goto err_free_channel_tc2realtxq;
- }
-
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
if (!priv->channel_stats)
- goto err_free_channel_tc2realtxq;
+ goto err_free_tx_rates;
return 0;
-err_free_channel_tc2realtxq:
- while (--i >= 0)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
destroy_workqueue(priv->wq);
+err_free_selq:
+ mlx5e_selq_cleanup(&priv->selq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
return -ENOMEM;
@@ -5274,12 +5310,12 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
- for (i = 0; i < priv->max_nch; i++)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
+ mutex_lock(&priv->state_lock);
+ mlx5e_selq_cleanup(&priv->selq);
+ mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb.max_qos_sqs; i++)
@@ -5345,6 +5381,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
}
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
dev_net_set(netdev, mlx5_core_net(mdev));
return netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 06d1f46f1688..6b7e7ea6ded2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -55,6 +55,7 @@
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
+#include "en/ptp.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -401,13 +402,18 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int n, tc, nch, num_sqs = 0;
struct mlx5e_channel *c;
- int n, tc, num_sqs = 0;
int err = -ENOMEM;
+ bool ptp_sq;
u32 *sqs;
- sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
- sizeof(*sqs), GFP_KERNEL);
+ ptp_sq = !!(priv->channels.ptp &&
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
+ nch = priv->channels.num + ptp_sq;
+
+ sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs),
+ GFP_KERNEL);
if (!sqs)
goto out;
@@ -416,6 +422,12 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
+ if (ptp_sq) {
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+
+ for (tc = 0; tc < ptp_ch->num_tc; tc++)
+ sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
+ }
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
@@ -632,11 +644,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -935,15 +942,21 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+ if (err)
+ goto err_ht_init;
+
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
- goto destroy_tises;
+ goto err_init_tx;
}
return 0;
-destroy_tises:
+err_init_tx:
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+err_ht_init:
mlx5e_destroy_tises(priv);
return err;
}
@@ -963,6 +976,8 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
+
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1099,6 +1114,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif
+ &MLX5E_STATS_GRP(ptp),
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b3f7520dfd08..adf5cc6a7b8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -64,11 +64,6 @@ struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
struct mlx5_rep_uplink_priv {
- /* Filters DB - instantiated by the uplink representor and shared by
- * the uplink's VFs
- */
- struct rhashtable tc_ht;
-
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
@@ -113,6 +108,7 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
+ struct rhashtable tc_ht;
};
static inline
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 6530d7bd5045..56bb58704bf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,6 +34,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/bitmap.h>
+#include <linux/filter.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
@@ -221,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
-static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
@@ -233,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
- if (!dev_page_is_reusable(dma_info->page)) {
+ if (!dev_page_is_reusable(page)) {
stats->cache_waive++;
return false;
}
- cache->page_cache[cache->tail] = *dma_info;
+ cache->page_cache[cache->tail].page = page;
+ cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
cache->tail = tail_next;
return true;
}
@@ -286,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
dma_info->page = NULL;
return -ENOMEM;
}
+ page_pool_set_dma_addr(dma_info->page, dma_info->addr);
return 0;
}
@@ -299,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
return mlx5e_page_alloc_pool(rq, dma_info);
}
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
+void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
- dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+ dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+
+ dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
DMA_ATTR_SKIP_CPU_SYNC);
+ page_pool_set_dma_addr(page, 0);
}
-void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle)
+void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
if (likely(recycle)) {
- if (mlx5e_rx_cache_put(rq, dma_info))
+ if (mlx5e_rx_cache_put(rq, page))
return;
- mlx5e_page_dma_unmap(rq, dma_info);
- page_pool_recycle_direct(rq->page_pool, dma_info->page);
+ mlx5e_page_dma_unmap(rq, page);
+ page_pool_recycle_direct(rq->page_pool, page);
} else {
- mlx5e_page_dma_unmap(rq, dma_info);
- page_pool_release_page(rq->page_pool, dma_info->page);
- put_page(dma_info->page);
+ mlx5e_page_dma_unmap(rq, page);
+ page_pool_release_page(rq->page_pool, page);
+ put_page(page);
}
}
@@ -333,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
*/
xsk_buff_free(dma_info->xsk);
else
- mlx5e_page_release_dynamic(rq, dma_info, recycle);
+ mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -373,12 +376,15 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+ u16 headroom;
+
err = mlx5e_get_rx_frag(rq, frag);
if (unlikely(err))
goto free_frags;
+ headroom = i == 0 ? rq->buff.headroom : 0;
wqe->data[i].addr = cpu_to_be64(frag->di->addr +
- frag->offset + rq->buff.headroom);
+ frag->offset + headroom);
}
return 0;
@@ -620,7 +626,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
@@ -960,8 +966,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
rq->stats->congst_umr++;
-#define UMR_WQE_BULK (2)
- if (likely(missing < UMR_WQE_BULK))
+ if (likely(missing < rq->mpwqe.min_wqe_bulk))
return false;
if (rq->page_pool)
@@ -1490,7 +1495,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
- u32 cqe_bcnt)
+ u32 cqe_bcnt, u32 metasize)
{
struct sk_buff *skb = build_skb(va, frag_size);
@@ -1502,6 +1507,9 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
skb_reserve(skb, headroom);
skb_put(skb, cqe_bcnt);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
return skb;
}
@@ -1509,7 +1517,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, false);
+ xdp_prepare_buff(xdp, va, headroom, len, true);
}
static struct sk_buff *
@@ -1518,8 +1526,9 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
{
struct mlx5e_dma_info *di = wi->di;
u16 rx_headroom = rq->buff.headroom;
- struct xdp_buff xdp;
+ struct bpf_prog *prog;
struct sk_buff *skb;
+ u32 metasize = 0;
void *va, *data;
u32 frag_size;
@@ -1529,16 +1538,23 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
- net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
- return NULL; /* page/packet was consumed by XDP */
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog) {
+ struct xdp_buff xdp;
- rx_headroom = xdp.data - xdp.data_hard_start;
+ net_prefetchw(va); /* xdp_frame data area */
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
+ return NULL; /* page/packet was consumed by XDP */
+
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
+ cqe_bcnt = xdp.data_end - xdp.data;
+ }
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
@@ -1554,41 +1570,103 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
- u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
- u16 frag_headlen = headlen;
- u16 byte_cnt = cqe_bcnt - headlen;
+ u16 rx_headroom = rq->buff.headroom;
+ struct mlx5e_dma_info *di = wi->di;
+ struct skb_shared_info *sinfo;
+ u32 frag_consumed_bytes;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ u32 truesize;
+ void *va;
- /* XDP is not supported in this configuration, as incoming packets
- * might spread among multiple pages.
- */
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats->buff_alloc_err++;
- return NULL;
- }
+ va = page_address(di->page) + wi->offset;
+ frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- net_prefetchw(skb->data);
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ rq->buff.frame0_sz, DMA_FROM_DEVICE);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(va + rx_headroom);
+
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ truesize = 0;
+
+ cqe_bcnt -= frag_consumed_bytes;
+ frag_info++;
+ wi++;
+
+ while (cqe_bcnt) {
+ skb_frag_t *frag;
+
+ di = wi->di;
+
+ frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
+
+ dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+ frag_consumed_bytes, DMA_FROM_DEVICE);
+
+ if (!xdp_buff_has_frags(&xdp)) {
+ /* Init on the first fragment to avoid cold cache access
+ * when possible.
+ */
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp);
+ }
- while (byte_cnt) {
- u16 frag_consumed_bytes =
- min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ __skb_frag_set_page(frag, di->page);
+ skb_frag_off_set(frag, wi->offset);
+ skb_frag_size_set(frag, frag_consumed_bytes);
- mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
- frag_consumed_bytes, frag_info->frag_stride);
- byte_cnt -= frag_consumed_bytes;
- frag_headlen = 0;
+ if (page_is_pfmemalloc(di->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp);
+
+ sinfo->xdp_frags_size += frag_consumed_bytes;
+ truesize += frag_info->frag_stride;
+
+ cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
}
- /* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
- headlen);
- /* skb linear part was allocated with headlen and aligned to long */
- skb->tail += headlen;
- skb->len += headlen;
+ di = head_wi->di;
+
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ int i;
+
+ for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++)
+ mlx5e_put_rx_frag(rq, &head_wi[i], true);
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
+ xdp.data - xdp.data_hard_start,
+ xdp.data_end - xdp.data,
+ xdp.data - xdp.data_meta);
+ if (unlikely(!skb))
+ return NULL;
+
+ page_ref_inc(di->page);
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ int i;
+
+ /* sinfo->nr_frags is reset by build_skb, calculate again. */
+ xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&xdp));
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
return skb;
}
@@ -1832,9 +1910,9 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
{
struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u16 rx_headroom = rq->buff.headroom;
- u32 cqe_bcnt32 = cqe_bcnt;
- struct xdp_buff xdp;
+ struct bpf_prog *prog;
struct sk_buff *skb;
+ u32 metasize = 0;
void *va, *data;
u32 frag_size;
@@ -1846,23 +1924,30 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
va = page_address(di->page) + head_offset;
data = va + rx_headroom;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
- net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
- if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
- __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
- return NULL; /* page/packet was consumed by XDP */
- }
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog) {
+ struct xdp_buff xdp;
- rx_headroom = xdp.data - xdp.data_hard_start;
- frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
+ net_prefetchw(va); /* xdp_frame data area */
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+ if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ rx_headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
+ cqe_bcnt = xdp.data_end - xdp.data;
+ }
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
@@ -1893,7 +1978,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
prefetchw(hdr);
prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 00f1d16db456..bdc870f9c2f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,6 +37,10 @@
#include "en/ptp.h"
#include "en/port.h"
+#ifdef CONFIG_PAGE_POOL_STATS
+#include <net/page_pool.h>
+#endif
+
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
return !priv->profile->stats_grps_num ? 0 :
@@ -183,6 +187,19 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -349,6 +366,19 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
s->rx_recover += rq_stats->recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+ s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
+ s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
+ s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
+ s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
+ s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
+ s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
+ s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
+ s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
+ s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
+ s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
+ s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -455,6 +485,35 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
+#ifdef CONFIG_PAGE_POOL_STATS
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+ struct mlx5e_rq_stats *rq_stats = c->rq.stats;
+ struct page_pool *pool = c->rq.page_pool;
+ struct page_pool_stats stats = { 0 };
+
+ if (!page_pool_get_stats(pool, &stats))
+ return;
+
+ rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
+ rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
+ rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+ rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
+ rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
+ rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
+
+ rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
+ rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
+ rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
+ rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
+ rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+}
+#else
+static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
+{
+}
+#endif
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -462,9 +521,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->channels.num; i++) /* for active channels only */
+ mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
+
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
priv->channel_stats[i];
+
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
@@ -1887,6 +1950,19 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
@@ -2348,7 +2424,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
-static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+MLX5E_DEFINE_STATS_GRP(ptp, 0);
static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 2c1ed5b81be6..a7a025d15c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -205,7 +205,19 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-
+#ifdef CONFIG_PAGE_POOL_STATS
+ u64 rx_pp_alloc_fast;
+ u64 rx_pp_alloc_slow;
+ u64 rx_pp_alloc_slow_high_order;
+ u64 rx_pp_alloc_empty;
+ u64 rx_pp_alloc_refill;
+ u64 rx_pp_alloc_waive;
+ u64 rx_pp_recycle_cached;
+ u64 rx_pp_recycle_cache_full;
+ u64 rx_pp_recycle_ring;
+ u64 rx_pp_recycle_ring_full;
+ u64 rx_pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -352,6 +364,19 @@ struct mlx5e_rq_stats {
u64 congst_umr;
u64 arfs_err;
u64 recover;
+#ifdef CONFIG_PAGE_POOL_STATS
+ u64 pp_alloc_fast;
+ u64 pp_alloc_slow;
+ u64 pp_alloc_slow_high_order;
+ u64 pp_alloc_empty;
+ u64 pp_alloc_refill;
+ u64 pp_alloc_waive;
+ u64 pp_recycle_cached;
+ u64 pp_recycle_cache_full;
+ u64 pp_recycle_ring;
+ u64 pp_recycle_ring_full;
+ u64 pp_recycle_released_ref;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
@@ -459,5 +484,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
+extern MLX5E_DECLARE_STATS_GRP(ptp);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b27532a9301e..e3fc15ae7bb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -115,6 +115,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
static struct lock_class_key tc_ht_lock_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -273,6 +274,23 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
+static struct mlx5e_post_act *
+get_post_action(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->post_act;
+ }
+
+ return priv->fs.tc.post_act;
+}
+
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
@@ -295,13 +313,62 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
if (is_mdev_switchdev_mode(priv->mdev)) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
-
return;
}
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+struct mlx5_flow_handle *
+mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (attr->flags & MLX5_ATTR_FLAG_CT) {
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
+ &attr->parse_attr->mod_hdr_acts;
+
+ return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
+ spec, attr,
+ mod_hdr_acts);
+ }
+
+ if (!is_mdev_switchdev_mode(priv->mdev))
+ return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
+ return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+}
+
+void
+mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (attr->flags & MLX5_ATTR_FLAG_CT) {
+ mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
+ return;
+ }
+
+ if (!is_mdev_switchdev_mode(priv->mdev)) {
+ mlx5e_del_offloaded_nic_rule(priv, rule, attr);
+ return;
+ }
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
+ mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
+ return;
+ }
+
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+}
+
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
@@ -1039,6 +1106,21 @@ err_ft_get:
}
static int
+alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
+ struct mlx5_flow_attr *attr)
+
+{
+ struct mlx5_fc *counter;
+
+ counter = mlx5_fc_create(counter_dev, true);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
+ attr->counter = counter;
+ return 0;
+}
+
+static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1046,7 +1128,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_core_dev *dev = priv->mdev;
- struct mlx5_fc *counter;
int err;
parse_attr = attr->parse_attr;
@@ -1058,11 +1139,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter))
- return PTR_ERR(counter);
-
- attr->counter = counter;
+ err = alloc_flow_attr_counter(dev, attr);
+ if (err)
+ return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -1072,8 +1151,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return err;
}
- if (flow_flag_test(flow, CT))
- flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
+ if (attr->flags & MLX5_ATTR_FLAG_CT)
+ flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
attr, &parse_attr->mod_hdr_acts);
else
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
@@ -1107,8 +1186,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
flow_flag_clear(flow, OFFLOADED);
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ if (attr->flags & MLX5_ATTR_FLAG_CT)
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
else if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
@@ -1132,6 +1211,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ free_flow_post_acts(flow);
+
kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1142,40 +1223,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
- if (flow_flag_test(flow, CT)) {
- mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
-
- rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
- flow, spec, attr,
- mod_hdr_acts);
- } else if (flow_flag_test(flow, SAMPLE)) {
- rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
- mlx5e_tc_get_flow_tun_id(flow));
- } else {
- rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
- }
+ rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
- if (IS_ERR(flow->rule[1])) {
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- else
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- return flow->rule[1];
- }
+ if (IS_ERR(flow->rule[1]))
+ goto err_rule1;
}
return rule;
+
+err_rule1:
+ mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
+ return flow->rule[1];
}
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
@@ -1184,19 +1252,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
- goto offload_rule_0;
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
+ return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
- if (flow_flag_test(flow, CT))
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- else if (flow_flag_test(flow, SAMPLE))
- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
- else
-offload_rule_0:
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
}
struct mlx5_flow_handle *
@@ -1214,7 +1276,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -1239,7 +1301,7 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
@@ -1348,10 +1410,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
}
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
@@ -1361,13 +1423,107 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
if (IS_ERR(mod_hdr))
return PTR_ERR(mod_hdr);
- WARN_ON(flow->attr->modify_hdr);
- flow->attr->modify_hdr = mod_hdr;
+ WARN_ON(attr->modify_hdr);
+ attr->modify_hdr = mod_hdr;
return 0;
}
static int
+set_encap_dests(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *encap_valid,
+ bool *vf_tun)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct net_device *encap_dev = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
+ int out_index;
+ int err = 0;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return 0;
+
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+ *encap_valid = true;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
+ int mirred_ifindex;
+
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto out;
+ }
+ err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
+ extack, &encap_dev, encap_valid);
+ dev_put(out_dev);
+ if (err)
+ goto out;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
+ }
+
+ if (*vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static void
+clean_encap_dests(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ bool *vf_tun)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ int out_index;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ mlx5e_detach_encap(priv, flow, attr, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1375,15 +1531,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
- bool vf_tun = false, encap_valid = true;
- struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *out_priv;
- struct mlx5_fc *counter;
+ bool vf_tun, encap_valid;
u32 max_prio, max_chain;
int err = 0;
- int out_index;
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
@@ -1472,50 +1623,17 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- struct net_device *out_dev;
- int mirred_ifindex;
-
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
- if (!out_dev) {
- NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
- err = -ENODEV;
- goto err_out;
- }
- err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
- extack, &encap_dev, &encap_valid);
- dev_put(out_dev);
- if (err)
- goto err_out;
-
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- vf_tun = true;
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
- esw_attr->dests[out_index].mdev = out_priv->mdev;
- }
-
- if (vf_tun && esw_attr->out_count > 1) {
- NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
- err = -EOPNOTSUPP;
+ err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
goto err_out;
- }
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
if (vf_tun) {
- err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
if (err)
goto err_out;
} else {
@@ -1526,20 +1644,16 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw_attr->counter_dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
+ err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
+ if (err)
goto err_out;
- }
-
- attr->counter = counter;
}
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid)
+ if (!encap_valid || flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -1576,8 +1690,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- bool vf_tun = false;
- int out_index;
+ bool vf_tun;
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
@@ -1601,16 +1714,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- vf_tun = true;
- if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
- mlx5e_detach_encap(priv, flow, out_index);
- kfree(attr->parse_attr->tun_info[out_index]);
- }
- }
+ clean_encap_dests(priv, flow, attr, &vf_tun);
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
@@ -1634,7 +1738,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
- kfree(attr->sample_attr);
+ free_flow_post_acts(flow);
+
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -1642,7 +1747,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- return flow->attr->counter;
+ struct mlx5_flow_attr *attr;
+
+ attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
+ return attr->counter;
}
/* Iterate over tmp_list of flows attached to flow_list head. */
@@ -1854,7 +1962,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
- flow->tunnel_id = value;
+ flow->attr->tunnel_id = value;
return 0;
err_set:
@@ -1868,8 +1976,8 @@ err_enc_opts:
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
{
- u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
- u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
+ u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
+ u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw;
@@ -1885,11 +1993,6 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
enc_opts_id);
}
-u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
-{
- return flow->tunnel_id;
-}
-
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
void *headers_c, void *headers_v)
@@ -2811,14 +2914,15 @@ static unsigned long mask_to_le(unsigned long mask, int size)
return mask;
}
+
static int offload_pedit_fields(struct mlx5e_priv *priv,
int namespace,
- struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
@@ -2944,35 +3048,43 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};
-static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action_flags,
- struct netlink_ext_ack *extack)
+static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers *cmd_masks;
- int err;
u8 cmd;
- err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
- action_flags, extack);
- if (err < 0)
- goto out_dealloc_parsed_actions;
-
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
- cmd_masks = &hdrs[cmd].masks;
+ cmd_masks = &parse_attr->hdrs[cmd].masks;
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
- NL_SET_ERR_MSG_MOD(extack,
- "attempt to offload an unsupported field");
+ NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
- err = -EOPNOTSUPP;
- goto out_dealloc_parsed_actions;
+ return -EOPNOTSUPP;
}
}
return 0;
+}
+
+static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ u32 *action_flags,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
+ if (err)
+ goto out_dealloc_parsed_actions;
+
+ err = verify_offload_pedit_fields(priv, parse_attr, extack);
+ if (err)
+ goto out_dealloc_parsed_actions;
+
+ return 0;
out_dealloc_parsed_actions:
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
@@ -3176,11 +3288,11 @@ actions_match_supported_fdb(struct mlx5e_priv *priv,
static bool
actions_match_supported(struct mlx5e_priv *priv,
struct flow_action *flow_action,
+ u32 actions,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- u32 actions = flow->attr->action;
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
@@ -3248,57 +3360,13 @@ bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
}
static int
-parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action)
-{
- struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
- struct mlx5_flow_attr *attr = flow->attr;
- enum mlx5_flow_namespace_type ns_type;
- struct mlx5e_priv *priv = flow->priv;
- const struct flow_action_entry *act;
- struct mlx5e_tc_act *tc_act;
- int err, i;
-
- ns_type = mlx5e_get_flow_namespace(flow);
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act) {
- NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
- return -EOPNOTSUPP;
- }
-
- if (!tc_act->can_offload(parse_state, act, i))
- return -EOPNOTSUPP;
-
- err = tc_act->parse_action(parse_state, act, priv, attr);
- if (err)
- return err;
- }
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act || !tc_act->post_parse ||
- !tc_act->can_offload(parse_state, act, i))
- continue;
-
- err = tc_act->post_parse(parse_state, priv, attr);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
- struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct pedit_headers_action *hdrs = parse_attr->hdrs;
enum mlx5_flow_namespace_type ns_type;
int err;
@@ -3308,8 +3376,7 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
ns_type = mlx5e_get_flow_namespace(flow);
- err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
- &attr->action, extack);
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
if (err)
return err;
@@ -3330,6 +3397,299 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
return 0;
}
+static struct mlx5_flow_attr*
+mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 attr_sz = ns_to_attr_sz(ns_type);
+ struct mlx5_flow_attr *attr2;
+
+ attr2 = mlx5_alloc_flow_attr(ns_type);
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr) {
+ kvfree(parse_attr);
+ kfree(attr2);
+ return NULL;
+ }
+
+ memcpy(attr2, attr, attr_sz);
+ INIT_LIST_HEAD(&attr2->list);
+ parse_attr->filter_dev = attr->parse_attr->filter_dev;
+ attr2->action = 0;
+ attr2->flags = 0;
+ attr2->parse_attr = parse_attr;
+ return attr2;
+}
+
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int i;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ esw_attr = attr->esw_attr;
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
+ return attr;
+ }
+ }
+
+ return NULL;
+}
+
+void
+mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
+ }
+}
+
+static void
+free_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *tmp;
+ bool vf_tun;
+
+ list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ if (attr->post_act_handle)
+ mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
+
+ clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(counter_dev, attr->counter);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ if (attr->modify_hdr)
+ mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ }
+
+ list_del(&attr->list);
+ kvfree(attr->parse_attr);
+ kfree(attr);
+ }
+}
+
+int
+mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+ int err = 0;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* TC filter rule HW translation:
+ *
+ * +---------------------+
+ * + ft prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * |
+ * | if multi table action
+ * |
+ * v
+ * +---------------------+
+ * + post act ft |<----.
+ * + match fte id | | split on multi table action
+ * + do actions |-----'
+ * +---------------------+
+ * |
+ * |
+ * v
+ * Do rest of the actions after last multi table action.
+ */
+static int
+alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *next_attr = NULL;
+ struct mlx5e_post_act_handle *handle;
+ bool vf_tun, encap_valid = true;
+ int err;
+
+ /* This is going in reverse order as needed.
+ * The first entry is the last attribute.
+ */
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (!next_attr) {
+ /* Set counter action on last post act rule. */
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
+ if (err)
+ goto out_free;
+ }
+
+ /* Don't add post_act rule for first attr (last in the list).
+ * It's being handled by the caller.
+ */
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
+ goto out_free;
+
+ if (!encap_valid)
+ flow_flag_set(flow, SLOW);
+
+ err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+ if (err)
+ goto out_free;
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+ if (err)
+ goto out_free;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+ if (err)
+ goto out_free;
+ }
+
+ handle = mlx5e_tc_post_act_add(post_act, attr);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_free;
+ }
+
+ attr->post_act_handle = handle;
+ next_attr = attr;
+ }
+
+ if (flow_flag_test(flow, SLOW))
+ goto out;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err)
+ goto out_free;
+
+out:
+ return 0;
+
+out_free:
+ free_flow_post_acts(flow);
+ return err;
+}
+
+static int
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow_action flow_action_reorder;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5_flow_attr *attr = flow->attr;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ struct flow_action_entry *act, **_act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
+
+ flow_action_reorder.num_entries = flow_action->num_entries;
+ flow_action_reorder.entries = kcalloc(flow_action->num_entries,
+ sizeof(flow_action), GFP_KERNEL);
+ if (!flow_action_reorder.entries)
+ return -ENOMEM;
+
+ mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+ list_add(&attr->list, &flow->attrs);
+
+ flow_action_for_each(i, _act, &flow_action_reorder) {
+ act = *_act;
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (!tc_act->can_offload(parse_state, act, i, attr)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ goto out_free;
+
+ parse_state->actions |= attr->action;
+
+ /* Split attr for multi table act if not the last act. */
+ if (tc_act->is_multi_table_act &&
+ tc_act->is_multi_table_act(priv, act, attr) &&
+ i < flow_action_reorder.num_entries - 1) {
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free;
+
+ attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ list_add(&attr->list, &flow->attrs);
+ }
+ }
+
+ kfree(flow_action_reorder.entries);
+
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free_post_acts;
+
+ err = alloc_flow_post_acts(flow, extack);
+ if (err)
+ goto out_free_post_acts;
+
+ return 0;
+
+out_free:
+ kfree(flow_action_reorder.entries);
+out_free_post_acts:
+ free_flow_post_acts(flow);
+
+ return err;
+}
+
static int
flow_action_supported(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -3357,7 +3717,6 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
- struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
@@ -3369,17 +3728,17 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
- hdrs = parse_state->hdrs;
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
- err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -3480,7 +3839,6 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- struct pedit_headers_action *hdrs;
int err;
err = flow_action_supported(flow_action, extack);
@@ -3492,7 +3850,6 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
- hdrs = parse_state->hdrs;
err = parse_tc_actions(parse_state, flow_action);
if (err)
@@ -3506,11 +3863,12 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -3545,12 +3903,11 @@ static const struct rhashtable_params tc_ht_params = {
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- return &uplink_rpriv->uplink_priv.tc_ht;
+ rpriv = priv->ppriv;
+ return &rpriv->tc_ht;
} else /* NIC offload */
return &priv->fs.tc.ht;
}
@@ -3585,7 +3942,12 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
sizeof(struct mlx5_nic_flow_attr);
struct mlx5_flow_attr *attr;
- return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ if (!attr)
+ return attr;
+
+ INIT_LIST_HEAD(&attr->list);
+ return attr;
}
static int
@@ -3619,6 +3981,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
+ INIT_LIST_HEAD(&flow->attrs);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
@@ -4119,6 +4482,46 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
+static int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
@@ -4146,10 +4549,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ err = mlx5e_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
@@ -4383,10 +4786,27 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5_chains_destroy(tc->chains);
}
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
+{
+ int err;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+
+ return 0;
+}
+
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
+{
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+}
+
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
- struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
@@ -4394,7 +4814,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
u64 mapping_id;
int err = 0;
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
@@ -4434,12 +4853,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_enc_opts_mapping = mapping;
- err = rhashtable_init(tc_ht, &tc_ht_params);
- if (err)
- goto err_ht_init;
-
- lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
-
uplink_priv->encap = mlx5e_tc_tun_init(priv);
if (IS_ERR(uplink_priv->encap)) {
err = PTR_ERR(uplink_priv->encap);
@@ -4449,8 +4862,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
return 0;
err_register_fib_notifier:
- rhashtable_destroy(tc_ht);
-err_ht_init:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
@@ -4464,13 +4875,8 @@ err_tun_mapping:
return err;
}
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
- struct mlx5_rep_uplink_priv *uplink_priv;
-
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
-
- rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 5ffae9b13066..a80b00946f1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -53,7 +53,6 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
-
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
@@ -71,7 +70,7 @@ struct mlx5_flow_attr {
struct mlx5_fc *counter;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
- struct mlx5e_sample_attr *sample_attr;
+ struct mlx5e_sample_attr sample_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -82,13 +81,33 @@ struct mlx5_flow_attr {
u8 outer_match_level;
u8 ip_version;
u8 tun_ip_version;
+ int tunnel_id; /* mapped tunnel id */
u32 flags;
+ struct list_head list;
+ struct mlx5e_post_act_handle *post_act_handle;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
};
};
+enum {
+ MLX5_ATTR_FLAG_VLAN_HANDLED = BIT(0),
+ MLX5_ATTR_FLAG_SLOW_PATH = BIT(1),
+ MLX5_ATTR_FLAG_NO_IN_PORT = BIT(2),
+ MLX5_ATTR_FLAG_SRC_REWRITE = BIT(3),
+ MLX5_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ATTR_FLAG_ACCEPT = BIT(5),
+ MLX5_ATTR_FLAG_CT = BIT(6),
+};
+
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5e_tc_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ATTR_FLAG_SLOW_PATH | MLX5_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_rx_tun_attr {
u16 decap_vport;
union {
@@ -149,8 +168,11 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -243,11 +265,8 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
u32 data);
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow);
-
-struct mlx5e_tc_flow;
-u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
@@ -289,6 +308,8 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
+static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ee7ecb88adc1..2dc48406cd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -53,117 +53,6 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
-{
- int dscp_cp = 0;
-
- if (skb->protocol == htons(ETH_P_IP))
- dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- else if (skb->protocol == htons(ETH_P_IPV6))
- dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-
- return priv->dcbx_dp.dscp2prio[dscp_cp];
-}
-#endif
-
-static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int up = 0;
-
- if (!netdev_get_num_tc(dev))
- goto return_txq;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
-return_txq:
- return priv->port_ptp_tc2realtxq[up];
-}
-
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
- u16 htb_maj_id)
-{
- u16 classid;
-
- if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
- classid = TC_H_MIN(skb->priority);
- else
- classid = READ_ONCE(priv->htb.defcls);
-
- if (!classid)
- return 0;
-
- return mlx5e_get_txq_by_classid(priv, classid);
-}
-
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int num_tc_x_num_ch;
- int txq_ix;
- int up = 0;
- int ch_ix;
-
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
- if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
- struct mlx5e_ptp *ptp_channel;
-
- /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
-
- if (unlikely(htb_maj_id)) {
- txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
- if (txq_ix > 0)
- return txq_ix;
- }
-
- ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb)))
- return mlx5e_select_ptpsq(dev, skb);
-
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
- * If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq()
- * and mlx5e_select_htb_queue().
- */
- if (unlikely(txq_ix >= num_tc_x_num_ch))
- txq_ix %= num_tc_x_num_ch;
- } else {
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- }
-
- if (!netdev_get_num_tc(dev))
- return txq_ix;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
- /* Normalize any picked txq_ix to [0, num_channels),
- * So we can return a txq_ix that matches the channel and
- * packet UP.
- */
- ch_ix = priv->txq2sq[txq_ix]->ch_ix;
-
- return priv->channel_tc2realtxq[ch_ix][up];
-}
-
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
{
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
@@ -544,7 +433,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -645,7 +534,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
@@ -691,8 +580,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx5e_txqsq *sq;
u16 pi;
+ /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
+ * queue being changed is disabled, and smp_wmb guarantees that the
+ * changes are visible before mlx5e_xmit tries to read from txq2sq. It
+ * guarantees that the value of txq2sq[qid] doesn't change while
+ * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
+ * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
+ */
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
if (unlikely(!sq)) {
+ /* Two cases when sq can be NULL:
+ * 1. The HTB node is registered, and mlx5e_select_queue
+ * selected its queue ID, but the SQ itself is not yet created.
+ * 2. HTB SQ creation failed. Similar to the previous case, but
+ * the SQ won't be created.
+ */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48a45aa54a3c..229728c80233 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
@@ -439,7 +438,8 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table;
int i;
- eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
+ eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
+ dev->priv.numa_node);
if (!eq_table)
return -ENOMEM;
@@ -728,7 +728,8 @@ struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq_param *param)
{
- struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
+ struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
+ dev->priv.numa_node);
int err;
if (!eq)
@@ -888,10 +889,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
return ncomp_eqs;
INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
+
for (i = 0; i < ncomp_eqs; i++) {
struct mlx5_eq_param param = {};
- eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
if (!eq) {
err = -ENOMEM;
goto clean;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 39e948bc1204..a994e71e05c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
+ flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
NULL, &flow_act, NULL, 0);
@@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
vport->ingress.offloads.modify_metadata_rule = NULL;
}
+static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.fg = vport->ingress.offloads.drop_grp;
+ flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto out;
+ }
+
+ vport->ingress.offloads.drop_rule = flow_rule;
+out:
+ return err;
+}
+
+static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->ingress.offloads.drop_rule)
+ return;
+
+ mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
+ vport->ingress.offloads.drop_rule = NULL;
+}
+
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
{
esw_acl_ingress_allow_rule_destroy(vport);
esw_acl_ingress_mod_metadata_destroy(esw, vport);
+ esw_acl_ingress_src_port_drop_destroy(esw, vport);
}
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
@@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
if (!flow_group_in)
return -ENOMEM;
+ if (vport->vport == MLX5_VPORT_UPLINK) {
+ /* This group can hold an FTE to drop all traffic.
+ * Need in case LAG is enabled.
+ */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
+ if (IS_ERR(g)) {
+ ret = PTR_ERR(g);
+ esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
+ vport->vport, ret);
+ goto drop_err;
+ }
+ vport->ingress.offloads.drop_grp = g;
+ flow_index++;
+ }
+
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
+ memset(flow_group_in, 0, inlen);
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
@@ -221,6 +272,11 @@ metadata_err:
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
+ if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
+ vport->ingress.offloads.drop_grp = NULL;
+ }
+drop_err:
kvfree(flow_group_in);
return ret;
}
@@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
+
+ if (vport->ingress.offloads.drop_grp) {
+ mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
+ vport->ingress.offloads.drop_grp = NULL;
+ }
}
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
@@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ num_ftes++;
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
@@ -320,3 +383,27 @@ out:
vport->metadata = vport->default_metadata;
return err;
}
+
+int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (IS_ERR(vport)) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ return PTR_ERR(vport);
+ }
+
+ return esw_acl_ingress_src_port_drop_create(esw, vport);
+}
+
+void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (WARN_ON_ONCE(IS_ERR(vport))) {
+ esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+ return;
+ }
+
+ esw_acl_ingress_src_port_drop_destroy(esw, vport);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
index c57869b93d60..11d3d3978848 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -6,6 +6,7 @@
#include "eswitch.h"
+#ifdef CONFIG_MLX5_ESWITCH
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
@@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
u32 metadata);
+void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
+#else /* CONFIG_MLX5_ESWITCH */
+static void
+mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{}
+
+static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index c275fe028b6d..0abef71cb839 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -86,7 +86,7 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
mlx5_eswitch_is_vf_vport(esw, vport_num) &&
esw->dev == dest_mdev &&
attr->ip_version &&
- attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
}
u16
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ead5e8acc8be..bac5160837c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -113,8 +113,11 @@ struct vport_ingress {
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
+ /* Optional group to add a drop all rule */
+ struct mlx5_flow_group *drop_grp;
struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule;
+ struct mlx5_flow_handle *drop_rule;
} offloads;
};
@@ -448,22 +451,6 @@ enum {
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
};
-enum {
- MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
- MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
- MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
- MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
- MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
- MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
-};
-
-/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
-static inline bool
-mlx5_esw_attr_flags_skip(u32 attr_flags)
-{
- return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
-}
-
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
@@ -487,6 +474,7 @@ struct mlx5_esw_flow_attr {
int src_port_rewrite_act_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_rx_tun_attr *rx_tun_attr;
+ struct ethhdr eth;
struct mlx5_pkt_reformat *decap_pkt_reformat;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cfcd72bad9af..3f63df127091 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -180,7 +180,7 @@ esw_setup_decap_indir(struct mlx5_eswitch *esw,
{
struct mlx5_flow_table *ft;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
ft = mlx5_esw_indir_table_get(esw, attr, spec,
@@ -201,12 +201,12 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
- struct mlx5_flow_attr *attr,
+ u32 sampler_id,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
- dest[i].sampler_id = attr->sample_attr->sampler_id;
+ dest[i].sampler_id = sampler_id;
return 0;
}
@@ -297,7 +297,7 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
/* flow steering cannot handle more than one dest with the same ft
@@ -364,7 +364,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int j, err;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
@@ -463,15 +463,16 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
esw_src_port_rewrite_supported(esw))
- attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+ attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
- esw_setup_sampler_dest(dest, flow_act, attr, *i);
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
+ !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
+ esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
+ } else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
@@ -498,7 +499,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
+ } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -589,7 +590,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
fdb = attr->ft;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
+ if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
mlx5_eswitch_set_rule_source_port(esw, spec, attr,
esw_attr->in_mdev->priv.eswitch,
esw_attr->in_rep->vport);
@@ -721,7 +722,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!mlx5_esw_attr_flags_skip(attr->flags)) {
+ if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -863,7 +864,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (err)
goto unlock;
- attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
vport = esw_vlan_action_get_vport(esw_attr, push, pop);
@@ -871,7 +872,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
/* tracks VF --> wire rules without vlan push action */
if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
- attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
}
goto unlock;
@@ -902,7 +903,7 @@ skip_set_push:
}
out:
if (!err)
- attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
+ attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
unlock:
mutex_unlock(&esw->state_lock);
return err;
@@ -921,7 +922,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return 0;
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
+ if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -2378,60 +2379,6 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
-static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
-{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
- struct mlx5_eswitch *esw;
- struct mlx5_flow_root_namespace *root;
- struct mlx5_flow_namespace *ns;
- struct mlx5_vport *vport;
- int err;
-
- MLX5_SET(set_flow_table_root_in, in, opcode,
- MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
- MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
- MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
- MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
-
- if (master) {
- esw = master->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
- MLX5_SET(set_flow_table_root_in, in, table_vport_number,
- MLX5_VPORT_UPLINK);
-
- ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->index);
- root = find_root(&ns->node);
- mutex_lock(&root->chain_lock);
-
- MLX5_SET(set_flow_table_root_in, in,
- table_eswitch_owner_vhca_id_valid, 1);
- MLX5_SET(set_flow_table_root_in, in,
- table_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(master, vhca_id));
- MLX5_SET(set_flow_table_root_in, in, table_id,
- root->root_ft->id);
- } else {
- esw = slave->priv.eswitch;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- ns = mlx5_get_flow_vport_acl_namespace(slave,
- MLX5_FLOW_NAMESPACE_ESW_INGRESS,
- vport->index);
- root = find_root(&ns->node);
- mutex_lock(&root->chain_lock);
- MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
- }
-
- err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
- mutex_unlock(&root->chain_lock);
-
- return err;
-}
-
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
@@ -2613,15 +2560,10 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
{
int err;
- err = esw_set_uplink_slave_ingress_root(master_esw->dev,
- slave_esw->dev);
- if (err)
- return -EINVAL;
-
err = esw_set_slave_root_fdb(master_esw->dev,
slave_esw->dev);
if (err)
- goto err_fdb;
+ return err;
err = esw_set_master_egress_rule(master_esw->dev,
slave_esw->dev);
@@ -2633,9 +2575,6 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
err_acl:
esw_set_slave_root_fdb(NULL, slave_esw->dev);
-err_fdb:
- esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
-
return err;
}
@@ -2644,7 +2583,6 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
{
esw_unset_master_egress_rule(master_esw->dev);
esw_set_slave_root_fdb(NULL, slave_esw->dev);
- esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -2841,6 +2779,19 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
+#define MLX5_ESW_METADATA_RSVD_UPLINK 1
+
+/* Share the same metadata for uplink's. This is fine because:
+ * (a) In shared FDB mode (LAG) both uplink's are treated the
+ * same and tagged with the same metadata.
+ * (b) In non shared FDB mode, packets from physical port0
+ * cannot hit eswitch of PF1 and vice versa.
+ */
+static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
+{
+ return MLX5_ESW_METADATA_RSVD_UPLINK;
+}
+
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
@@ -2855,8 +2806,10 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
return 0;
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
- /* Use only non-zero vport_id (1-4095) for all PF's */
- id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
+ /* Use only non-zero vport_id (2-4095) for all PF's */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
+ MLX5_ESW_METADATA_RSVD_UPLINK + 1,
+ vport_end_ida, GFP_KERNEL);
if (id < 0)
return 0;
id = (pf_num << ESW_VPORT_BITS) | id;
@@ -2874,7 +2827,11 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
+ else
+ vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
+
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
}
@@ -2885,6 +2842,9 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
if (!vport->default_metadata)
return;
+ if (vport->vport == MLX5_VPORT_UPLINK)
+ return;
+
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
@@ -3377,6 +3337,27 @@ static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
!mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
}
+/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
+ * is never correct and prone to races. It's a transitional workaround,
+ * never repeat this pattern.
+ *
+ * This code MUST be fixed before removing devlink_mutex as it is safe
+ * to do only because of that mutex.
+ */
+static void mlx5_eswtich_mode_callback_enter(struct devlink *devlink,
+ struct mlx5_eswitch *esw)
+{
+ devl_unlock(devlink);
+ down_write(&esw->mode_lock);
+}
+
+static void mlx5_eswtich_mode_callback_exit(struct devlink *devlink,
+ struct mlx5_eswitch *esw)
+{
+ up_write(&esw->mode_lock);
+ devl_lock(devlink);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3391,6 +3372,15 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ /* FIXME: devl_unlock() followed by devl_lock() inside driver callback
+ * is never correct and prone to races. It's a transitional workaround,
+ * never repeat this pattern.
+ *
+ * This code MUST be fixed before removing devlink_mutex as it is safe
+ * to do only because of that mutex.
+ */
+ devl_unlock(devlink);
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3421,6 +3411,7 @@ unlock:
mlx5_esw_unlock(esw);
enable_lag:
mlx5_lag_enable_change(esw->dev);
+ devl_lock(devlink);
return err;
}
@@ -3433,14 +3424,14 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_mode_to_devlink(esw->mode, mode);
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3487,7 +3478,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto out;
@@ -3524,11 +3515,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
goto out;
esw->offloads.inline_mode = mlx5_mode;
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return 0;
out:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3541,14 +3532,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3564,7 +3555,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
@@ -3610,7 +3601,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
@@ -3624,15 +3615,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
-
- down_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_enter(devlink, esw);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
*encap = esw->offloads.encap;
unlock:
- up_write(&esw->mode_lock);
+ mlx5_eswtich_mode_callback_exit(devlink, esw);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 182306bbefaa..ee568bf34ae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -219,12 +219,14 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- mlx5_esw_attr_flags_skip(attr->flags) ||
+ mlx5e_tc_attr_flags_skip(attr->flags) ||
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
/* push vlan on RX */
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
return true;
/* hairpin */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 2ce4241459ce..39c03dcbd196 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index dafe341358c7..a0ac17c3f12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -152,6 +152,12 @@ static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
return 0;
}
+static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave,
bool ft_id_valid,
@@ -971,6 +977,12 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
}
+static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -990,6 +1002,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_get_capabilities,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
@@ -1011,6 +1024,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_stub_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 220ec632d35a..274004e80f03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -101,6 +101,9 @@ struct mlx5_flow_cmds {
u16 format_id, u32 *match_mask);
int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
int definer_id);
+
+ u32 (*get_capabilities)(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 537c82b9aa53..816d991f7621 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1696,6 +1696,7 @@ static void free_match_list(struct match_list *head, bool ft_locked)
static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_group *fg,
bool ft_locked)
{
struct rhlist_head *tmp, *list;
@@ -1710,6 +1711,9 @@ static int build_match_list(struct match_list *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
+ if (fg && fg != g)
+ continue;
+
if (unlikely(!tree_get_node(&g->node)))
continue;
@@ -1889,6 +1893,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!check_valid_spec(spec))
return ERR_PTR(-EINVAL);
+ if (flow_act->fg && ft->autogroup.active)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
@@ -1898,7 +1905,7 @@ search_again_locked:
version = atomic_read(&ft->node.version);
/* Collect all fgs which has a matching match_criteria */
- err = build_match_list(&match_head, ft, spec, take_write);
+ err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
if (err) {
if (take_write)
up_write_ref_node(&ft->node, false);
@@ -3042,6 +3049,22 @@ void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
steering->esw_ingress_root_ns = NULL;
}
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+
+ ns = mlx5_get_flow_namespace(dev, type);
+ if (!ns)
+ return 0;
+
+ root = find_root(&ns->node);
+ if (!root)
+ return 0;
+
+ return root->cmds->get_capabilities(root, root->table_type);
+}
+
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5469b08d635f..c488a7c5b07e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -120,6 +120,11 @@ enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_SMFS
};
+enum mlx5_flow_steering_capabilty {
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
+};
+
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
enum mlx5_flow_steering_mode mode;
@@ -301,6 +306,8 @@ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
+
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2d8406fab844..614687e0e3d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,7 +32,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
-#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
#include "lib/tout.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 84dbe46d5ede..4aa22dce9b77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -57,7 +57,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
}
-static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
+ u8 *reset_type, u8 *reset_state)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -71,25 +72,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r
*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
if (reset_type)
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
+ if (reset_state)
+ *reset_state = MLX5_GET(mfrl_reg, out, reset_state);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
}
-int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
+static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack)
+{
+ u8 reset_state;
+
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ goto out;
+
+ switch (reset_state) {
+ case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
+ case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
+ return -EBUSY;
+ case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
+ return -ETIMEDOUT;
+ case MLX5_MFRL_REG_RESET_STATE_NACK:
+ NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
+ return -EPERM;
+ }
+
+out:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset failed");
+ return -EIO;
+}
+
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ struct netlink_ext_ack *extack)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
int err;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
- if (err)
- clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- return err;
+
+ MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3);
+ MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1);
+ err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MFRL, 0, 1, false);
+ if (!err)
+ return 0;
+
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
+ return mlx5_fw_reset_get_reset_state_err(dev, extack);
+
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
+ return mlx5_cmd_check(dev, err, in, out);
}
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index 7761ee5fc7d0..694fc7cb2684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -9,7 +9,8 @@
void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
-int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
+ struct netlink_ext_ack *extack);
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 737df402c927..659021c31cbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 4ddf6b330a44..6cad3b72c133 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -31,15 +31,22 @@
*/
#include <linux/netdevice.h>
+#include <net/bonding.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "lag.h"
#include "mp.h"
+enum {
+ MLX5_LAG_EGRESS_PORT_1 = 1,
+ MLX5_LAG_EGRESS_PORT_2,
+};
+
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
@@ -193,15 +200,71 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
tracker->netdev_state[MLX5_LAG_P2].link_up;
- *port1 = 1;
- *port2 = 2;
+ *port1 = MLX5_LAG_EGRESS_PORT_1;
+ *port2 = MLX5_LAG_EGRESS_PORT_2;
if ((!p1en && !p2en) || (p1en && p2en))
return;
if (p1en)
- *port2 = 1;
+ *port2 = MLX5_LAG_EGRESS_PORT_1;
+ else
+ *port1 = MLX5_LAG_EGRESS_PORT_2;
+}
+
+static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
+{
+ return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop;
+}
+
+static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (!ldev->pf[i].has_drop)
+ continue;
+
+ mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
+ MLX5_VPORT_UPLINK);
+ ldev->pf[i].has_drop = false;
+ }
+}
+
+static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ struct mlx5_core_dev *inactive;
+ u8 v2p_port1, v2p_port2;
+ int inactive_idx;
+ int err;
+
+ /* First delete the current drop rule so there won't be any dropped
+ * packets
+ */
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+ if (!ldev->tracker.has_inactive)
+ return;
+
+ mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2);
+
+ if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) {
+ inactive = dev1;
+ inactive_idx = MLX5_LAG_P2;
+ } else {
+ inactive = dev0;
+ inactive_idx = MLX5_LAG_P1;
+ }
+
+ err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch,
+ MLX5_VPORT_UPLINK);
+ if (!err)
+ ldev->pf[inactive_idx].has_drop = true;
else
- *port1 = 2;
+ mlx5_core_err(inactive,
+ "Failed to create lag drop rule, error: %d", err);
}
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
@@ -238,6 +301,10 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
}
+
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ !(ldev->flags & MLX5_LAG_FLAG_ROCE))
+ mlx5_lag_drop_rule_setup(ldev, tracker);
}
static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
@@ -339,6 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return err;
}
+ if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ !roce_lag)
+ mlx5_lag_drop_rule_setup(ldev, tracker);
+
ldev->flags |= flags;
ldev->shared_fdb = shared_fdb;
return 0;
@@ -347,6 +418,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
u8 flags = ldev->flags;
@@ -356,8 +428,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
mlx5_lag_mp_reset(ldev);
if (ldev->shared_fdb) {
- mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
- ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
+ mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
+ dev1->priv.eswitch);
ldev->shared_fdb = false;
}
@@ -372,11 +444,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
- } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
- mlx5_lag_port_sel_destroy(ldev);
+ return err;
}
- return err;
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ mlx5_lag_port_sel_destroy(ldev);
+ if (mlx5_lag_has_drop_rule(ldev))
+ mlx5_lag_drop_rule_cleanup(ldev);
+
+ return 0;
}
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
@@ -613,6 +689,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
bool is_bonded, is_in_lag, mode_supported;
+ bool has_inactive = 0;
+ struct slave *slave;
int bond_status = 0;
int num_slaves = 0;
int changed = 0;
@@ -632,8 +710,12 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0)
+ if (idx >= 0) {
+ slave = bond_slave_get_rcu(ndev_tmp);
+ if (slave)
+ has_inactive |= bond_is_slave_inactive(slave);
bond_status |= (1 << idx);
+ }
num_slaves++;
}
@@ -648,6 +730,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
tracker->hash_type = lag_upper_info->hash_type;
}
+ tracker->has_inactive = has_inactive;
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
@@ -704,6 +787,38 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 1;
}
+static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ struct net_device *ndev)
+{
+ struct net_device *ndev_tmp;
+ struct slave *slave;
+ bool has_inactive = 0;
+ int idx;
+
+ if (!netif_is_lag_master(ndev))
+ return 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
+ idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
+ if (idx < 0)
+ continue;
+
+ slave = bond_slave_get_rcu(ndev_tmp);
+ if (slave)
+ has_inactive |= bond_is_slave_inactive(slave);
+ }
+ rcu_read_unlock();
+
+ if (tracker->has_inactive == has_inactive)
+ return 0;
+
+ tracker->has_inactive = has_inactive;
+
+ return 1;
+}
+
static int mlx5_lag_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -712,7 +827,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
- if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+ if (event != NETDEV_CHANGEUPPER &&
+ event != NETDEV_CHANGELOWERSTATE &&
+ event != NETDEV_CHANGEINFODATA)
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
@@ -728,6 +845,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
ndev, ptr);
break;
+ case NETDEV_CHANGEINFODATA:
+ changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
+ break;
}
ldev->tracker = tracker;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index e5d231c31b54..cbf9a9003e55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -28,6 +28,7 @@ enum {
struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
+ bool has_drop;
};
/* Used for collection of netdev event info. */
@@ -35,6 +36,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ unsigned int has_inactive:1;
enum netdev_lag_hash hash_type;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 626aa60b6099..4a6ec15ef046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -50,7 +50,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
- struct lag_tracker tracker;
+ struct lag_tracker tracker = {};
if (!__mlx5_lag_is_multipath(ldev))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
index 4bad6a5fde56..f240ffe5116c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
@@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
{
}
-
-static inline int
-mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
- void *buf, int len)
-{
- return 0;
-}
#endif
#endif /* __LIB_HV_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index e042e0924079..4571c56ec3c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
new file mode 100644
index 000000000000..9b8c051ccf65
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+#include "smfs.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec)
+{
+ struct mlx5dr_match_parameters matcher_mask = {};
+
+ matcher_mask.match_buf = (u64 *)&spec->match_criteria;
+ matcher_mask.match_sz = DR_SZ_MATCH_PARAM;
+
+ return mlx5dr_matcher_create(table, priority, spec->match_criteria_enable, &matcher_mask);
+}
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+ mlx5dr_matcher_destroy(matcher);
+}
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+ return mlx5dr_table_get_from_fs_ft(ft);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table)
+{
+ return mlx5dr_action_create_dest_table(table);
+}
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id)
+{
+ return mlx5dr_action_create_flow_counter(counter_id);
+}
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action)
+{
+ mlx5dr_action_destroy(action);
+}
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+ size_t num_actions, struct mlx5dr_action *actions[],
+ u32 flow_source)
+{
+ struct mlx5dr_match_parameters value = {};
+
+ value.match_buf = (u64 *)spec->match_value;
+ value.match_sz = DR_SZ_MATCH_PARAM;
+
+ return mlx5dr_rule_create(matcher, &value, num_actions, actions, flow_source);
+}
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule)
+{
+ mlx5dr_rule_destroy(rule);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
new file mode 100644
index 000000000000..452d0df339ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LIB_SMFS_H__
+#define __MLX5_LIB_SMFS_H__
+
+#include "steering/mlx5dr.h"
+#include "steering/dr_types.h"
+
+struct mlx5dr_matcher *
+mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
+
+void
+mlx5_smfs_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_table *
+mlx5_smfs_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5_smfs_action_create_flow_counter(u32 counter_id);
+
+void
+mlx5_smfs_action_destroy(struct mlx5dr_action *action);
+
+struct mlx5dr_rule *
+mlx5_smfs_rule_create(struct mlx5dr_matcher *matcher, struct mlx5_flow_spec *spec,
+ size_t num_actions, struct mlx5dr_action *actions[],
+ u32 flow_source);
+
+void
+mlx5_smfs_rule_destroy(struct mlx5dr_rule *rule);
+
+#endif /* __MLX5_LIB_SMFS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index e3b0a131c3e1..d55e15c1f380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/refcount.h>
#include <linux/mlx5/driver.h>
#include <net/vxlan.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 976578d1904c..2589e39eb9c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -736,10 +736,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
if (err) {
- u32 syndrome;
- u8 status;
+ u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
+ u8 status = MLX5_GET(query_issi_out, query_out, status);
- mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (!status || syndrome == MLX5_DRIVER_SYND) {
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
err, status, syndrome);
@@ -1488,8 +1487,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
INIT_LIST_HEAD(&priv->pgdir_list);
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
- priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
- mlx5_debugfs_root);
+ priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
+ mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
@@ -1525,7 +1524,7 @@ err_pagealloc_init:
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
- debugfs_remove(dev->priv.dbg_root);
+ debugfs_remove(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
@@ -1543,7 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
- debugfs_remove_recursive(dev->priv.dbg_root);
+ debugfs_remove_recursive(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index e019d68062d8..495cca58dccc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 37b2805b3bf3..a9b2d6ead542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -177,7 +177,6 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index f099a087400e..9d735c343a3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f6b5451328fc..ec76a8b1acc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -32,7 +32,6 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/xarray.h>
@@ -327,11 +326,12 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int notify_fail, bool ec_function)
+ int event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
+ int notify_fail = event;
u64 addr;
int err;
u32 *in;
@@ -351,8 +351,10 @@ retry:
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, function);
- if (err)
+ if (err) {
+ dev->priv.fw_pages_alloc_failed += (npages - i);
goto out_4k;
+ }
goto retry;
}
@@ -365,11 +367,20 @@ retry:
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
+ if (err == -EREMOTEIO) {
+ notify_fail = 0;
+ /* if triggered by FW and failed by FW ignore */
+ if (event) {
+ err = 0;
+ goto out_dropped;
+ }
+ }
if (err) {
+ err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
- goto out_4k;
+ goto out_dropped;
}
dev->priv.fw_pages += npages;
@@ -384,6 +395,8 @@ retry:
kvfree(in);
return 0;
+out_dropped:
+ dev->priv.give_pages_dropped += npages;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
@@ -455,7 +468,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 i = 0;
if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_do(dev, in, in_size, out, out_size);
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
@@ -479,7 +492,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
}
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
- int *nclaimed, bool ec_function)
+ int *nclaimed, bool event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
@@ -507,6 +520,14 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
+ npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+ dev->priv.reclaim_pages_discard += npages;
+ }
+ /* if triggered by FW event and failed by FW then ignore */
+ if (event && err == -EREMOTEIO)
+ err = 0;
+ if (err) {
+ err = mlx5_cmd_check(dev, err, in, out);
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -546,7 +567,7 @@ static void pages_work_handler(struct work_struct *work)
release_all_pages(dev, req->func_id, req->ec_function);
else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
- req->ec_function);
+ true, req->ec_function);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
@@ -645,7 +666,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
int err;
err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
- &nclaimed, mlx5_core_is_ecpf(dev));
+ &nclaimed, false, mlx5_core_is_ecpf(dev));
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
err, func_id);
@@ -700,12 +721,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
return -ENOMEM;
xa_init(&dev->priv.page_root_xa);
+ mlx5_pages_debugfs_init(dev);
return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
+ mlx5_pages_debugfs_cleanup(dev);
xa_destroy(&dev->priv.page_root_xa);
destroy_workqueue(dev->priv.pg_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 41807ef55201..db77f1d2eeb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -3,7 +3,6 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
@@ -601,7 +600,8 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+ irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
+ dev->priv.numa_node);
if (!irq_table)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index aabc53ad8bdd..ee5ffdeb9015 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7b16a1188aab..e1bd54574ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -33,9 +33,10 @@
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
-int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
- int size_in, void *data_out, int size_out,
- u16 reg_id, int arg, int write)
+/* calling with verbose false will not print error to log */
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose)
{
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
@@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
MLX5_SET(access_register_in, in, argument, arg);
MLX5_SET(access_register_in, in, register_id, reg_id);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
+ if (verbose)
+ err = mlx5_cmd_check(dev, err, in, out);
if (err)
goto out;
@@ -69,6 +72,15 @@ out:
kvfree(in);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_access_reg);
+
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+ int size_in, void *data_out, int size_out,
+ u16 reg_id, int arg, int write)
+{
+ return mlx5_access_reg(dev, data_in, size_in, data_out, size_out,
+ reg_id, arg, write, true);
+}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
@@ -263,7 +275,6 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
- int module_mapping;
int err;
MLX5_SET(pmlp_reg, in, local_port, 1);
@@ -272,8 +283,9 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
if (err)
return err;
- module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
- *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+ *module_num = MLX5_GET(lane_2_module_mapping,
+ MLX5_ADDR_OF(pmlp_reg, out, lane0_module_mapping),
+ module);
return 0;
}
@@ -353,6 +365,12 @@ static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset
*offset -= MLX5_EEPROM_PAGE_LENGTH;
}
+static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
+{
+ /* mcia supports either 12 dwords or 32 dwords */
+ return (MLX5_CAP_MCAM_FEATURE(dev, mcia_32dwords) ? 32 : 12) * sizeof(u32);
+}
+
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params, u8 *data)
{
@@ -362,7 +380,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
void *ptr;
u16 size;
- size = min_t(int, params->size, MLX5_EEPROM_MAX_BYTES);
+ size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, size, size);
@@ -433,35 +451,12 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
u8 *data)
{
- u8 module_id;
int err;
err = mlx5_query_module_num(dev, &params->module_number);
if (err)
return err;
- err = mlx5_query_module_id(dev, params->module_number, &module_id);
- if (err)
- return err;
-
- switch (module_id) {
- case MLX5_MODULE_ID_SFP:
- if (params->page > 0)
- return -EINVAL;
- break;
- case MLX5_MODULE_ID_QSFP:
- case MLX5_MODULE_ID_QSFP28:
- case MLX5_MODULE_ID_QSFP_PLUS:
- if (params->page > 3)
- return -EINVAL;
- break;
- case MLX5_MODULE_ID_DSFP:
- break;
- default:
- mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
- return -EINVAL;
- }
-
if (params->i2c_address != MLX5_I2C_ADDR_HIGH &&
params->i2c_address != MLX5_I2C_ADDR_LOW) {
mlx5_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 7161220afe30..9f8b4005f4bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index c61a5e83c78c..850937cd8bf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -570,6 +570,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
for (i = 0; i < num_actions; i++) {
struct mlx5dr_action_dest_tbl *dest_tbl;
+ struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -598,9 +599,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
matcher->tbl->level,
dest_tbl->tbl->level);
}
- attr.final_icm_addr = rx_rule ?
- dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+ chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+ dest_tbl->tbl->tx.s_anchor->chunk;
+ attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
} else {
struct mlx5dr_cmd_query_flow_table_details output;
int ret;
@@ -669,15 +670,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
- if (rx_rule) {
- if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
- mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
- return -EOPNOTSUPP;
- }
- attr.final_icm_addr = action->vport->caps->icm_address_rx;
- } else {
- attr.final_icm_addr = action->vport->caps->icm_address_tx;
- }
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
@@ -1129,7 +1124,8 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
action->rewrite->data = (void *)hw_actions;
- action->rewrite->index = (action->rewrite->chunk->icm_addr -
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr
+ (action->rewrite->chunk) -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
@@ -1708,7 +1704,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
action->rewrite->modify_ttl = modify_ttl;
action->rewrite->data = (u8 *)hw_actions;
action->rewrite->num_of_actions = num_hw_actions;
- action->rewrite->index = (chunk->icm_addr -
+ action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 2784cd59fefe..d5998ef59be4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -3,7 +3,6 @@
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/seq_file.h>
#include "dr_types.h"
@@ -218,7 +217,8 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
}
- dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+ dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
+ DR_STE_SIZE_REDUCED);
seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
@@ -347,16 +347,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
const u64 matcher_id)
{
enum dr_dump_rec_type rec_type;
+ u64 s_icm_addr, e_icm_addr;
int i, ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
DR_DUMP_REC_TYPE_MATCHER_TX;
+ s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
+ e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
- dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -427,12 +430,14 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
const u64 table_id)
{
enum dr_dump_rec_type rec_type;
+ u64 s_icm_addr;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
+ s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+ dr_dump_icm_to_idx(s_icm_addr));
return 0;
}
@@ -630,7 +635,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
}
dmn->dump_info.steering_debugfs =
- debugfs_create_dir("steering", dev->priv.dbg_root);
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
dmn->dump_info.fdb_debugfs =
debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5fa7f9d6d8b9..fc6ae49b5ecc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index e289cfdbce07..4ca67fa24cc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -57,6 +57,36 @@ static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
return mlx5_core_create_mkey(mdev, mkey, in, inlen);
}
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
+{
+ u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+ return (u64)offset * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
+{
+ return chunk->buddy_mem->icm_mr->mkey;
+}
+
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
+{
+ u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+ return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
+{
+ return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
+ chunk->buddy_mem->pool->icm_type);
+}
+
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
+{
+ return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
+}
+
static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
@@ -158,12 +188,13 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
+ int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
memset(chunk->hw_ste_arr, 0,
- chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ num_of_entries * dr_icm_buddy_get_ste_size(buddy));
memset(chunk->ste_arr, 0,
- chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
+ num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@@ -177,7 +208,7 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
{
enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
- buddy->used_memory -= chunk->byte_size;
+ buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
if (icm_type == DR_ICM_TYPE_STE)
@@ -298,21 +329,14 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
- chunk->rkey = buddy_mem_pool->icm_mr->mkey;
- chunk->mr_addr = offset;
- chunk->icm_addr =
- (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
- chunk->num_of_entries =
- mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
- chunk->byte_size =
- mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
+ chunk->size = chunk_size;
chunk->buddy_mem = buddy_mem_pool;
if (pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_chunk_ste_init(chunk, offset);
- buddy_mem_pool->used_memory += chunk->byte_size;
+ buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
@@ -336,6 +360,7 @@ static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ u32 num_entries;
int err;
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -348,9 +373,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
- mlx5dr_buddy_free_mem(buddy, chunk->seg,
- ilog2(chunk->num_of_entries));
- pool->hot_memory_size -= chunk->byte_size;
+ num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+ mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
+ pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
dr_icm_chunk_destroy(chunk, buddy);
}
@@ -448,7 +473,7 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
/* move the memory to the waiting list AKA "hot" */
mutex_lock(&pool->mutex);
list_move_tail(&chunk->chunk_list, &buddy->hot_list);
- pool->hot_memory_size += chunk->byte_size;
+ pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
/* Check if we have chunks that are waiting for sync-ste */
if (dr_icm_pool_is_sync_required(pool))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 38971fe1dfe1..0726848eb3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -47,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
return spec->ttl_hoplimit;
}
+static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
+{
+ return spec->ipv4_ihl;
+}
+
#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
(_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
(_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
@@ -103,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
}
@@ -144,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
}
@@ -261,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
}
static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
{
- return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
}
@@ -507,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_ttl_set(&mask.outer))
+ if (dr_mask_is_ttl_set(&mask.outer) ||
+ dr_mask_is_ipv4_ihl_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
@@ -614,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_ttl_set(&mask.inner))
+ if (dr_mask_is_ttl_set(&mask.inner) ||
+ dr_mask_is_ipv4_ihl_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
@@ -698,7 +705,7 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
/* Connect start hash table to end anchor */
info.type = CONNECT_MISS;
- info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
curr_nic_matcher->s_htbl,
&info, false);
@@ -719,12 +726,14 @@ static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
return ret;
/* Update the pointing ste and next hash table */
- curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
- prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+ curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
+ prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
if (next_nic_matcher) {
- next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
- curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste =
+ curr_nic_matcher->e_anchor->chunk->ste_arr;
+ curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
+ next_nic_matcher->s_htbl;
}
return 0;
@@ -1036,12 +1045,12 @@ static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
if (next_nic_matcher) {
info.type = CONNECT_HIT;
info.hit_next_htbl = next_nic_matcher->s_htbl;
- next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
- prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+ next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
+ prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
} else {
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
- prev_anchor->ste_arr[0].next_htbl = NULL;
+ prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
}
return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b4374578425b..ddfaf7891188 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -21,12 +21,12 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
- 0, last_ste->hw_ste,
+ 0, mlx5dr_ste_get_hw_ste(last_ste),
ste_info_last, send_list, true);
return 0;
@@ -41,6 +41,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
+ u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -53,9 +54,9 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
}
/* One and only entry, never grows */
- ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ ste = new_htbl->chunk->ste_arr;
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -79,7 +80,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
- ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+ ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
/* Next table */
if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
@@ -107,9 +108,11 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
* is already written to the hw.
*/
if (ste_info->size == DR_STE_SIZE_CTRL)
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+ memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+ ste_info->data, DR_STE_SIZE_CTRL);
else
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+ ste_info->data, DR_STE_SIZE_REDUCED);
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
@@ -159,7 +162,7 @@ dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
/* Check if hw_ste is present in the list */
list_for_each_entry(ste, miss_list, miss_list_node) {
- if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
+ if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
return ste;
}
@@ -185,7 +188,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
/* In collision entry, all members share the same miss_list_head */
- new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+ new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
@@ -235,6 +238,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
+ u64 icm_addr;
int new_idx;
u8 sb_idx;
@@ -243,12 +247,12 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
- memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
- new_ste = &new_htbl->ste_arr[new_idx];
+ new_ste = &new_htbl->chunk->ste_arr[new_idx];
if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
@@ -269,7 +273,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
use_update_list = true;
}
- memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
new_htbl->ctrl.num_of_valid_entries++;
@@ -334,7 +338,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
int err = 0;
int i;
- cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
+ cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
if (cur_entries < 1) {
mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
@@ -342,7 +346,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
}
for (i = 0; i < cur_entries; i++) {
- cur_ste = &cur_htbl->ste_arr[i];
+ cur_ste = &cur_htbl->chunk->ste_arr[i];
if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
continue;
@@ -398,7 +402,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
- info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
dmn->info.caps.gvmi,
nic_dmn->type,
@@ -446,21 +450,21 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
* (48B len) which works only on first 32B
*/
mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
- prev_htbl->ste_arr[0].hw_ste,
- new_htbl->chunk->icm_addr,
- new_htbl->chunk->num_of_entries);
+ prev_htbl->chunk->hw_ste_arr,
+ mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
+ mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
- ste_to_update = &prev_htbl->ste_arr[0];
+ ste_to_update = &prev_htbl->chunk->ste_arr[0];
} else {
mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
- cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
- 0, ste_to_update->hw_ste, ste_info,
- update_list, false);
+ 0, mlx5dr_ste_get_hw_ste(ste_to_update),
+ ste_info, update_list, false);
return new_htbl;
@@ -489,10 +493,10 @@ static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
enum mlx5dr_icm_chunk_size new_size;
- new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
+ new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
- if (new_size == cur_htbl->chunk_size)
+ if (new_size == cur_htbl->chunk->size)
return NULL; /* Skip rehash, we already at the max size */
return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
@@ -659,13 +663,13 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
int threshold;
- if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
+ if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
return false;
if (!mlx5dr_ste_htbl_may_grow(htbl))
return false;
- if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+ if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
return false;
threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
@@ -755,6 +759,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
+ u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
@@ -762,8 +767,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
- nic_matcher->e_anchor->chunk->icm_addr);
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
ste->ste_chain_location = ste_location;
@@ -822,7 +827,7 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
again:
index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
miss_list = &cur_htbl->chunk->miss_list[index];
- ste = &cur_htbl->ste_arr[index];
+ ste = &cur_htbl->chunk->ste_arr[index];
if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
@@ -858,7 +863,7 @@ again:
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
- cur_htbl->chunk_size);
+ cur_htbl->chunk->size);
mlx5dr_htbl_put(cur_htbl);
} else {
cur_htbl = new_htbl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 00aef47d7682..ef19a66f5233 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -407,17 +407,17 @@ static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
int *iterations,
int *num_stes)
{
+ u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int alloc_size;
- if (htbl->chunk->byte_size > dmn->send_ring->max_post_send_size) {
- *iterations = htbl->chunk->byte_size /
- dmn->send_ring->max_post_send_size;
+ if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
+ *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
*byte_size = dmn->send_ring->max_post_send_size;
alloc_size = *byte_size;
*num_stes = *byte_size / DR_STE_SIZE;
} else {
*iterations = 1;
- *num_stes = htbl->chunk->num_of_entries;
+ *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
alloc_size = *num_stes * DR_STE_SIZE;
}
@@ -453,7 +453,7 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
send_info.write.length = size;
send_info.write.lkey = 0;
send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
- send_info.rkey = ste->htbl->chunk->rkey;
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
return dr_postsend_icm_data(dmn, &send_info);
}
@@ -462,7 +462,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste, u8 *mask)
{
- u32 byte_size = htbl->chunk->byte_size;
+ u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int num_stes_per_iter;
int iterations;
u8 *data;
@@ -486,7 +486,7 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
* need to add the bit_mask
*/
for (j = 0; j < num_stes_per_iter; j++) {
- struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
+ struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
u32 ste_off = j * DR_STE_SIZE;
if (mlx5dr_ste_is_not_used(ste)) {
@@ -495,7 +495,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
} else {
/* Copy data */
memcpy(data + ste_off,
- htbl->ste_arr[ste_index + j].hw_ste,
+ htbl->chunk->hw_ste_arr +
+ DR_STE_SIZE_REDUCED * (ste_index + j),
DR_STE_SIZE_REDUCED);
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
@@ -511,8 +512,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
send_info.write.length = byte_size;
send_info.write.lkey = 0;
send_info.remote_addr =
- mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
- send_info.rkey = htbl->chunk->rkey;
+ mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
if (ret)
@@ -530,7 +531,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u8 *ste_init_data,
bool update_hw_ste)
{
- u32 byte_size = htbl->chunk->byte_size;
+ u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
int iterations;
int num_stes;
u8 *copy_dst;
@@ -546,7 +547,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (update_hw_ste) {
/* Copy the reduced STE to hash table ste_arr */
for (i = 0; i < num_stes; i++) {
- copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+ copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
@@ -568,8 +569,8 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
send_info.write.length = byte_size;
send_info.write.lkey = 0;
send_info.remote_addr =
- mlx5dr_ste_get_mr_addr(htbl->ste_arr + ste_index);
- send_info.rkey = htbl->chunk->rkey;
+ mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
if (ret)
@@ -591,8 +592,9 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
send_info.write.length = action->rewrite->num_of_actions *
DR_MODIFY_ACTION_SIZE;
send_info.write.lkey = 0;
- send_info.remote_addr = action->rewrite->chunk->mr_addr;
- send_info.rkey = action->rewrite->chunk->rkey;
+ send_info.remote_addr =
+ mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
+ send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
ret = dr_postsend_icm_data(dmn, &send_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 187e29b409b6..09ebd3088857 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -25,6 +25,7 @@ bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
+ u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
u8 masked[DR_STE_SIZE_TAG] = {};
u32 crc32, index;
@@ -32,7 +33,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
int i;
/* Don't calculate CRC if the result is predicted */
- if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
+ if (num_entries == 1 || htbl->byte_mask == 0)
return 0;
/* Mask tag using byte mask, bit per byte */
@@ -45,7 +46,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
}
crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
- index = crc32 & (htbl->chunk->num_of_entries - 1);
+ index = crc32 & (num_entries - 1);
return index;
}
@@ -96,13 +97,11 @@ void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
}
static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste *ste, u64 miss_addr)
+ u8 *hw_ste, u64 miss_addr)
{
- u8 *hw_ste_p = ste->hw_ste;
-
- ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
- ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
}
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -113,37 +112,45 @@ void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
+ return base_icm_addr + DR_STE_SIZE * index;
}
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
+ return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
+}
+
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
+{
+ u64 index = ste - ste->htbl->chunk->ste_arr;
+
+ return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
}
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
{
- u32 index = ste - ste->htbl->ste_arr;
+ u32 index = ste - ste->htbl->chunk->ste_arr;
- return &ste->htbl->miss_list[index];
+ return &ste->htbl->chunk->miss_list[index];
}
static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste *ste,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- u8 *hw_ste = ste->hw_ste;
ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
- ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
+ mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
- dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
+ dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
}
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -166,7 +173,8 @@ bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
*/
static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
{
- memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
+ DR_STE_SIZE_REDUCED);
dst->next_htbl = src->next_htbl;
if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst;
@@ -184,18 +192,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_htbl *stats_tbl)
{
u8 tmp_data_ste[DR_STE_SIZE] = {};
- struct mlx5dr_ste tmp_ste = {};
u64 miss_addr;
- tmp_ste.hw_ste = tmp_data_ste;
+ miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
/* Use temp ste because dr_ste_always_miss_addr
* touches bit_mask area which doesn't exist at ste->hw_ste.
+ * Need to use a full-sized (DR_STE_SIZE) hw_ste.
*/
- memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
- miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
- memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+ dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
+ memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -237,7 +244,7 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
/* Copy all 64 hw_ste bytes */
- memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
sb_idx = ste->ste_chain_location - 1;
mlx5dr_ste_set_bit_mask(hw_ste,
nic_matcher->ste_builder[sb_idx].bit_mask);
@@ -273,12 +280,13 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
if (WARN_ON(!prev_ste))
return;
- miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
- ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
+ ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
- prev_ste->hw_ste, ste_info,
- send_ste_list, true /* Copy data*/);
+ mlx5dr_ste_get_hw_ste(prev_ste),
+ ste_info, send_ste_list,
+ true /* Copy data*/);
list_del_init(&ste->miss_list_node);
@@ -364,9 +372,11 @@ void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
- struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+ u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
+ u32 num_entries =
+ mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
- ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
}
void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
@@ -385,15 +395,22 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_htbl_connect_info *connect_info)
{
bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
- struct mlx5dr_ste ste = {};
+ u8 tmp_hw_ste[DR_STE_SIZE] = {0};
ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
- ste.hw_ste = formatted_ste;
+ /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
+ * touches bit_mask area which doesn't exist at ste->hw_ste.
+ * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+ */
+ memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
+ connect_info->hit_next_htbl);
else
- dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
+ connect_info->miss_icm_addr);
+ memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -444,7 +461,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
/* Write new table to HW */
info.type = CONNECT_MISS;
- info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
+ info.miss_icm_addr =
+ mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
&info, false)) {
mlx5dr_info(dmn, "Failed writing table to HW\n");
@@ -470,6 +488,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
+ u32 num_entries;
int i;
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
@@ -483,22 +502,18 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->chunk = chunk;
htbl->lu_type = lu_type;
htbl->byte_mask = byte_mask;
- htbl->ste_arr = chunk->ste_arr;
- htbl->hw_ste_arr = chunk->hw_ste_arr;
- htbl->miss_list = chunk->miss_list;
htbl->refcount = 0;
+ num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- for (i = 0; i < chunk->num_of_entries; i++) {
- struct mlx5dr_ste *ste = &htbl->ste_arr[i];
+ for (i = 0; i < num_entries; i++) {
+ struct mlx5dr_ste *ste = &chunk->ste_arr[i];
- ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl;
ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node);
- INIT_LIST_HEAD(&htbl->miss_list[i]);
+ INIT_LIST_HEAD(&chunk->miss_list[i]);
}
- htbl->chunk_size = chunk_size;
return htbl;
out_free_htbl:
@@ -523,8 +538,8 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
- attr, added_stes);
+ ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ hw_ste_arr, attr, added_stes);
}
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -534,8 +549,8 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
- attr, added_stes);
+ ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ hw_ste_arr, attr, added_stes);
}
const struct mlx5dr_ste_action_modify_field *
@@ -793,6 +808,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo
spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
+ spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
@@ -1360,15 +1376,14 @@ void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_header_0_1_init(sb, mask);
}
-static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
- [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
- [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
-};
-
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
{
- if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
- return NULL;
+ if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
+ return mlx5dr_ste_get_ctx_v0();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return mlx5dr_ste_get_ctx_v1();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
+ return mlx5dr_ste_get_ctx_v2();
- return mlx5dr_ste_ctx_arr[version];
+ return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index ca8fa32b8680..17513baff9b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -161,11 +161,13 @@ struct mlx5dr_ste_ctx {
u32 actions_caps;
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
void (*set_actions_tx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
@@ -197,7 +199,8 @@ struct mlx5dr_ste_ctx {
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
-extern struct mlx5dr_ste_ctx ste_ctx_v0;
-extern struct mlx5dr_ste_ctx ste_ctx_v1;
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 2d62950f7a29..5a322335f204 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -408,6 +408,7 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
static void
dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
@@ -477,6 +478,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
static void
dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
+ u32 actions_caps,
u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
@@ -1152,6 +1154,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
return 0;
}
@@ -1897,7 +1900,7 @@ static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
}
-struct mlx5dr_ste_ctx ste_ctx_v0 = {
+static struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
@@ -1950,3 +1953,8 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.set_action_copy = &dr_ste_v0_set_action_copy,
.set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
+{
+ return &ste_ctx_v0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 6ca06800f1d9..fcb962c6db2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -3,7 +3,7 @@
#include <linux/types.h>
#include "mlx5_ifc_dr_ste_v1.h"
-#include "dr_ste.h"
+#include "dr_ste_v1.h"
#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
@@ -121,12 +121,12 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
@@ -223,22 +223,22 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
},
[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
@@ -262,7 +262,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
-static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
@@ -270,7 +270,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
}
-static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
{
u64 index =
((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
@@ -279,12 +279,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
return index << 6;
}
-static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
}
-static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
{
return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
}
@@ -295,13 +295,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
}
-static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
}
-static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
{
u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
@@ -314,7 +314,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
}
-static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
{
u64 index = (icm_addr >> 5) | ht_size;
@@ -322,8 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
}
-static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
- bool is_rx, u16 gvmi)
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
{
dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
@@ -333,8 +332,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
}
-static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
- u32 ste_size)
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
{
u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
u8 *mask = tag + DR_STE_SIZE_TAG;
@@ -511,11 +509,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
-static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr,
- u32 *added_stes)
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u32 actions_caps,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -533,7 +532,10 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
- allow_modify_hdr = false;
+
+ /* Check if vlan_pop and modify_hdr on same STE is supported */
+ if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+ allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_CTR])
@@ -631,11 +633,12 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct mlx5dr_ste_actions_attr *attr,
- u32 *added_stes)
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u32 actions_caps,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
{
u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
@@ -677,13 +680,16 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
- allow_modify_hdr = false;
- allow_ctr = false;
}
dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
+ allow_ctr = false;
+
+ /* Check if vlan_pop and modify_hdr on same STE is supported */
+ if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+ allow_modify_hdr = false;
}
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
@@ -731,9 +737,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = true;
- allow_ctr = false;
}
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+ allow_ctr = false;
}
if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
@@ -800,11 +806,11 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-static void dr_ste_v1_set_action_set(u8 *d_action,
- u8 hw_field,
- u8 shifter,
- u8 length,
- u32 data)
+void dr_ste_v1_set_action_set(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
@@ -814,11 +820,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action,
MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
}
-static void dr_ste_v1_set_action_add(u8 *d_action,
- u8 hw_field,
- u8 shifter,
- u8 length,
- u32 data)
+void dr_ste_v1_set_action_add(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
{
shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
@@ -828,12 +834,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action,
MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
}
-static void dr_ste_v1_set_action_copy(u8 *d_action,
- u8 dst_hw_field,
- u8 dst_shifter,
- u8 dst_len,
- u8 src_hw_field,
- u8 src_shifter)
+void dr_ste_v1_set_action_copy(u8 *d_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
{
dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
@@ -848,11 +854,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action,
#define DR_STE_DECAP_L3_ACTION_NUM 8
#define DR_STE_L2_HDR_MAX_SZ 20
-static int dr_ste_v1_set_action_decap_l3_list(void *data,
- u32 data_sz,
- u8 *hw_action,
- u32 hw_action_sz,
- u16 *used_hw_action_num)
+int dr_ste_v1_set_action_decap_l3_list(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num)
{
u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
void *data_ptr = padded_data;
@@ -977,8 +983,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1001,8 +1007,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
@@ -1025,8 +1031,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
@@ -1060,8 +1066,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
@@ -1079,8 +1085,8 @@ static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
- DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
if (mask->svlan_tag || mask->cvlan_tag) {
@@ -1201,8 +1207,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
-static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1234,8 +1240,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
-static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1314,8 +1320,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
@@ -1331,12 +1337,13 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+ DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
return 0;
}
-static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
@@ -1375,8 +1382,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
@@ -1399,8 +1406,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
@@ -1426,8 +1433,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
@@ -1471,8 +1478,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
@@ -1506,8 +1513,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
@@ -1547,8 +1554,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu
return 0;
}
-static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
@@ -1594,8 +1601,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
@@ -1616,8 +1623,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
@@ -1643,8 +1650,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
@@ -1673,9 +1680,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
-dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
@@ -1703,9 +1709,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
-dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
@@ -1726,8 +1731,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
@@ -1749,8 +1754,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
@@ -1773,8 +1778,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
@@ -1837,8 +1842,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
@@ -1892,8 +1897,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1901,8 +1906,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
}
-static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
@@ -1926,7 +1931,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va
return 0;
}
-static void
+void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -1959,7 +1964,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par
return 0;
}
-static void
+void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -1982,8 +1987,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v
return 0;
}
-static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask)
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
@@ -2008,7 +2013,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
+void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -2035,7 +2040,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-static void
+void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
@@ -2046,7 +2051,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
}
-struct mlx5dr_ste_ctx ste_ctx_v1 = {
+static struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
.build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
@@ -2091,7 +2096,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Actions */
.actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
DR_STE_CTX_ACTION_CAP_RX_PUSH |
- DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP |
+ DR_STE_CTX_ACTION_CAP_POP_MDFY,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
@@ -2103,3 +2109,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
+{
+ return &ste_ctx_v1;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
new file mode 100644
index 000000000000..8a1d49790c6e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V1_
+#define _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+ u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+ u32 actions_caps, u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+ u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+ u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+ u32 hw_action_sz, u16 *used_hw_action_num);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask);
+
+#endif /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
new file mode 100644
index 000000000000..c60fddd125d2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+ return &ste_ctx_v2;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 8ca110643cc0..e5f6412baea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -10,6 +10,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
+ struct mlx5dr_icm_chunk *chunk;
int ret;
if (!list_empty(&nic_tbl->nic_matcher_list))
@@ -22,13 +23,14 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
else
last_htbl = nic_tbl->s_anchor;
- if (action)
- nic_tbl->default_icm_addr =
- nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
- action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
- else
+ if (action) {
+ chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+ action->dest_tbl->tbl->rx.s_anchor->chunk :
+ action->dest_tbl->tbl->tx.s_anchor->chunk;
+ nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+ } else {
nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+ }
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_tbl->default_icm_addr;
@@ -222,10 +224,10 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl)
int ret;
if (tbl->rx.s_anchor)
- icm_addr_rx = tbl->rx.s_anchor->chunk->icm_addr;
+ icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
if (tbl->tx.s_anchor)
- icm_addr_tx = tbl->tx.s_anchor->chunk->icm_addr;
+ icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
ft_attr.table_type = tbl->table_type;
ft_attr.icm_addr_rx = icm_addr_rx;
@@ -305,3 +307,8 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
{
return tbl->table_id;
}
+
+struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+ return ft->fs_dr_table.dr_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 55fcb751e24a..46866a5fc5ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -91,6 +91,7 @@ enum mlx5dr_ste_ctx_action_cap {
DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0,
DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1,
DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
+ DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
};
enum {
@@ -146,10 +147,12 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
- u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */
u32 refcount;
+ /* this ste is part of a rule, located in ste's chain */
+ u8 ste_chain_location;
+
/* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node;
@@ -160,9 +163,6 @@ struct mlx5dr_ste {
/* The rule this STE belongs to */
struct mlx5dr_rule_rx_tx *rule_rx_tx;
-
- /* this ste is part of a rule, located in ste's chain */
- u8 ste_chain_location;
};
struct mlx5dr_ste_htbl_ctrl {
@@ -180,14 +180,7 @@ struct mlx5dr_ste_htbl {
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
- struct mlx5dr_ste *ste_arr;
- u8 *hw_ste_arr;
-
- struct list_head *miss_list;
-
- enum mlx5dr_icm_chunk_size chunk_size;
struct mlx5dr_ste *pointing_ste;
-
struct mlx5dr_ste_htbl_ctrl ctrl;
};
@@ -555,7 +548,9 @@ struct mlx5dr_match_spec {
*/
u32 tcp_dport:16;
- u32 reserved_auto1:24;
+ u32 reserved_auto1:16;
+ u32 ipv4_ihl:4;
+ u32 reserved_auto2:4;
u32 ttl_hoplimit:8;
/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
@@ -1094,16 +1089,12 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
struct list_head chunk_list;
- u32 rkey;
- u32 num_of_entries;
- u32 byte_size;
- u64 icm_addr;
- u64 mr_addr;
/* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy
*/
unsigned int seg;
+ enum mlx5dr_icm_chunk_size size;
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
@@ -1143,6 +1134,13 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv);
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+
static inline int
mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
{
@@ -1175,7 +1173,7 @@ static inline int
mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
{
int num_of_entries =
- mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
+ mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
/* Threshold is 50%, one is added to table of size 1 */
return (num_of_entries + 1) / 2;
@@ -1184,7 +1182,7 @@ mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
static inline bool
mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
{
- if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+ if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
return false;
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3f311462bedf..045b0cf90063 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -754,6 +754,16 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
}
+static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB ||
+ MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+}
+
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return mlx5dr_is_supported(dev);
@@ -778,6 +788,7 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
+ .get_capabilities = mlx5_cmd_dr_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index dfa223415fe2..ec5cbec0d455 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -53,6 +53,9 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags);
+struct mlx5dr_table *
+mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
int mlx5dr_table_destroy(struct mlx5dr_table *table);
u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
@@ -136,7 +139,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_6DX)));
+ MLX5_STEERING_FORMAT_CONNECTX_7)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 01e9c412977c..8455e79bc44a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
@@ -100,19 +99,21 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
int err = -ENOMEM;
phys_addr_t pfn;
int bfregs;
+ int node;
int i;
bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
- up = kzalloc(sizeof(*up), GFP_KERNEL);
+ node = mdev->priv.numa_node;
+ up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
if (!up)
return ERR_PTR(err);
up->mdev = mdev;
- up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+ up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
if (!up->reg_bitmap)
goto error1;
- up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
+ up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
if (!up->fp_bitmap)
goto error1;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 866b9357939b..b13e0f8d232a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -177,17 +177,6 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_driver_priv);
-bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->driver->res_query_enabled;
-}
-EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
-
-bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->driver->temp_warn_enabled;
-}
-
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev)
@@ -212,6 +201,32 @@ struct mlxsw_event_listener_item {
void *priv;
};
+static const u8 mlxsw_core_trap_groups[] = {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
+};
+
+static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
+ int i;
+
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
+ mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/******************
* EMAD processing
******************/
@@ -777,16 +792,10 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (err)
goto err_trap_register;
- err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
- if (err)
- goto err_emad_trap_set;
mlxsw_core->emad.use_emad = true;
return 0;
-err_emad_trap_set:
- mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
- mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
@@ -1208,36 +1217,37 @@ static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
ARRAY_SIZE(mlxsw_core_fw_devlink_params));
}
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
static int mlxsw_devlink_port_split(struct devlink *devlink,
- unsigned int port_index,
+ struct devlink_port *port,
unsigned int count,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports) {
- NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
- return -EINVAL;
- }
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
- extack);
+ return mlxsw_core->driver->port_split(mlxsw_core,
+ mlxsw_core_port->local_port,
+ count, extack);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
- unsigned int port_index,
+ struct devlink_port *port,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(port);
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- if (port_index >= mlxsw_core->max_ports) {
- NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
- return -EINVAL;
- }
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
+ return mlxsw_core->driver->port_unsplit(mlxsw_core,
+ mlxsw_core_port->local_port,
extack);
}
@@ -1271,11 +1281,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
extack);
}
-static void *__dl_port(struct devlink_port *devlink_port)
-{
- return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
-}
-
static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type port_type)
{
@@ -1706,7 +1711,7 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
}
static const struct mlxsw_listener mlxsw_core_health_listener =
- MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+ MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
static int
mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
@@ -2019,7 +2024,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
struct devlink_health_reporter *fw_fatal;
int err;
- if (!mlxsw_core->driver->fw_fatal_enabled)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
@@ -2049,7 +2054,7 @@ err_trap_register:
static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
{
- if (!mlxsw_core->driver->fw_fatal_enabled)
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return;
mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
@@ -2069,7 +2074,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
- struct mlxsw_res *res;
size_t alloc_size;
int err;
@@ -2095,8 +2099,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
- res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->res);
if (err)
goto err_bus_init;
@@ -2122,6 +2126,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
+ err = mlxsw_core_trap_groups_set(mlxsw_core);
+ if (err)
+ goto err_trap_groups_set;
+
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -2181,6 +2189,7 @@ err_fw_rev_validate:
err_register_params:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
+err_trap_groups_set:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
@@ -2500,6 +2509,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
char hpkt_pl[MLXSW_REG_HPKT_LEN];
int err;
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return 0;
+
err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
listener->enabled_on_register);
if (err)
@@ -2529,6 +2541,9 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
{
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
+ return;
+
if (!listener->is_event) {
mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
listener->trap_id, listener->dis_trap_group,
@@ -2540,6 +2555,45 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i, err;
+
+ for (i = 0; i < listeners_count; i++) {
+ err = mlxsw_core_trap_register(mlxsw_core,
+ &listeners[i],
+ priv);
+ if (err)
+ goto err_listener_register;
+ }
+ return 0;
+
+err_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_traps_register);
+
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv)
+{
+ int i;
+
+ for (i = 0; i < listeners_count; i++) {
+ mlxsw_core_trap_unregister(mlxsw_core,
+ &listeners[i],
+ priv);
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_traps_unregister);
+
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
bool enabled)
@@ -2925,7 +2979,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
attrs.switch_id.id_len = switch_id_len;
mlxsw_core_port->local_port = local_port;
devlink_port_attrs_set(devlink_port, &attrs);
- err = devlink_port_register(devlink, devlink_port, local_port);
+ err = devl_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
return err;
@@ -2937,7 +2991,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
@@ -3181,9 +3235,6 @@ int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
u16 id;
int err;
- if (!res)
- return 0;
-
mlxsw_cmd_mbox_zero(mbox);
for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index f30bb8614e69..16ee5e90973d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,10 +35,6 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
-
-bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core);
-
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -163,6 +159,9 @@ struct mlxsw_listener {
.enabled_on_register = true, \
}
+#define MLXSW_CORE_EVENTL(_func, _trap_id) \
+ MLXSW_EVENTL(_func, _trap_id, CORE_EVENT)
+
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
void *priv, bool enabled);
@@ -181,6 +180,12 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
+int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
+void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listeners,
+ size_t listeners_count, void *priv);
int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
bool enabled);
@@ -315,7 +320,6 @@ struct mlxsw_driver {
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
- int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
@@ -398,9 +402,6 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
- bool res_query_enabled;
- bool fw_fatal_enabled;
- bool temp_warn_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 77e82e6cf6e8..fa33caecc91d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1957,6 +1957,83 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
+/* SIP DIP Action
+ * --------------
+ * The SIP_DIP_ACTION is used for modifying the SIP and DIP fields of the
+ * packet, e.g. for NAT. The L3 checksum is updated. Also, if the L4 is TCP or
+ * if the L4 is UDP and the checksum field is not zero, then the L4 checksum is
+ * updated.
+ */
+
+#define MLXSW_AFA_IP_CODE 0x11
+#define MLXSW_AFA_IP_SIZE 2
+
+enum mlxsw_afa_ip_s_d {
+ /* ip refers to dip */
+ MLXSW_AFA_IP_S_D_DIP,
+ /* ip refers to sip */
+ MLXSW_AFA_IP_S_D_SIP,
+};
+
+/* afa_ip_s_d
+ * Source or destination.
+ */
+MLXSW_ITEM32(afa, ip, s_d, 0x00, 31, 1);
+
+enum mlxsw_afa_ip_m_l {
+ /* LSB: ip[63:0] refers to ip[63:0] */
+ MLXSW_AFA_IP_M_L_LSB,
+ /* MSB: ip[63:0] refers to ip[127:64] */
+ MLXSW_AFA_IP_M_L_MSB,
+};
+
+/* afa_ip_m_l
+ * MSB or LSB.
+ */
+MLXSW_ITEM32(afa, ip, m_l, 0x00, 30, 1);
+
+/* afa_ip_ip_63_32
+ * Bits [63:32] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_63_32, 0x08, 0, 32);
+
+/* afa_ip_ip_31_0
+ * Bits [31:0] in the IP address to change to.
+ */
+MLXSW_ITEM32(afa, ip, ip_31_0, 0x0C, 0, 32);
+
+static void mlxsw_afa_ip_pack(char *payload, enum mlxsw_afa_ip_s_d s_d,
+ enum mlxsw_afa_ip_m_l m_l, u32 ip_31_0,
+ u32 ip_63_32)
+{
+ mlxsw_afa_ip_s_d_set(payload, s_d);
+ mlxsw_afa_ip_m_l_set(payload, m_l);
+ mlxsw_afa_ip_ip_31_0_set(payload, ip_31_0);
+ mlxsw_afa_ip_ip_63_32_set(payload, ip_63_32);
+}
+
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack)
+{
+ enum mlxsw_afa_ip_s_d s_d = is_dip ? MLXSW_AFA_IP_S_D_DIP :
+ MLXSW_AFA_IP_S_D_SIP;
+ enum mlxsw_afa_ip_m_l m_l = is_lsb ? MLXSW_AFA_IP_M_L_LSB :
+ MLXSW_AFA_IP_M_L_MSB;
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_IP_CODE,
+ MLXSW_AFA_IP_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append IP action");
+ return PTR_ERR(act);
+ }
+
+ mlxsw_afa_ip_pack(act, s_d, m_l, val_31_0, val_63_32);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_ip);
+
/* L4 Port Action
* --------------
* The L4_PORT_ACTION is used for modifying the sport and dport fields of the packet, e.g. for NAT.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 16cbd6acbb01..db58037be46e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -92,6 +92,9 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
+int mlxsw_afa_block_append_ip(struct mlxsw_afa_block *block, bool is_dip,
+ bool is_lsb, u32 val_31_0, u32 val_63_32,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 6dd4ae2f45f4..29a74b8bd5b5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -18,6 +18,7 @@ struct mlxsw_env_module_info {
int num_ports_mapped;
int num_ports_up;
enum ethtool_module_power_mode_policy power_mode_policy;
+ enum mlxsw_reg_pmtm_module_type type;
};
struct mlxsw_env {
@@ -27,14 +28,47 @@ struct mlxsw_env {
struct mlxsw_env_module_info module_info[];
};
-static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
- bool *qsfp, bool *cmis)
+static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ int err;
+
+ switch (mlxsw_env->module_info[module].type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR:
+ err = -EINVAL;
+ break;
+ default:
+ err = 0;
+ }
+
+ return err;
+}
+
+static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(core);
+ int err;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(core, module);
+ mutex_unlock(&mlxsw_env->module_info_lock);
+
+ return err;
+}
+
+static int
+mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp,
+ bool *cmis)
{
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 ident;
int err;
+ err = mlxsw_env_validate_module_type(core, id);
+ if (err)
+ return err;
+
mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
@@ -53,6 +87,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
*qsfp = true;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
*qsfp = true;
*cmis = true;
break;
@@ -206,7 +241,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
return 0;
}
-int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, int module,
struct ethtool_modinfo *modinfo)
{
u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
@@ -215,6 +251,13 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
unsigned int read_size;
int err;
+ err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev,
+ "EEPROM is not equipped on port module type");
+ return err;
+ }
+
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
module_info, false, &read_size);
if (err)
@@ -261,6 +304,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD:
+ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP:
/* Use SFF_8636 as base type. ethtool should recognize specific
* type through the identifier value.
*/
@@ -356,6 +400,13 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
{
u32 bytes_read = 0;
u16 device_addr;
+ int err;
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
/* Offset cannot be larger than 2 * ETH_MODULE_EEPROM_PAGE_LEN */
device_addr = page->offset;
@@ -364,7 +415,6 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u8 size;
- int err;
size = min_t(u8, page->length - bytes_read,
MLXSW_REG_MCIA_EEPROM_SIZE);
@@ -414,11 +464,14 @@ int mlxsw_env_reset_module(struct net_device *netdev,
!(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
return 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev, "Reset module is not supported on port module type\n");
+ goto out;
+ }
+
if (mlxsw_env->module_info[module].num_ports_up) {
netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
err = -EINVAL;
@@ -456,11 +509,14 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
u32 status_bits;
int err;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type");
+ goto out;
+ }
+
params->policy = mlxsw_env->module_info[module].power_mode_policy;
mlxsw_reg_mcion_pack(mcion_pl, module);
@@ -560,9 +616,6 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
bool low_power;
int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
@@ -571,6 +624,13 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
mutex_lock(&mlxsw_env->module_info_lock);
+ err = __mlxsw_env_validate_module_type(mlxsw_core, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Power mode set is not supported on port module type");
+ goto out;
+ }
+
if (mlxsw_env->module_info[module].power_mode_policy == policy)
goto out;
@@ -661,13 +721,12 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
}
-static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
- u8 module_count)
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core)
{
int i, err, sensor_index;
bool has_temp_sensor;
- for (i = 0; i < module_count; i++) {
+ for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
&has_temp_sensor);
if (err)
@@ -759,15 +818,12 @@ mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
}
static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
- MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
+ MLXSW_CORE_EVENTL(mlxsw_env_mtwe_listener_func, MTWE);
static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
- return 0;
-
return mlxsw_core_trap_register(mlxsw_core,
&mlxsw_env_temp_warn_listener,
mlxsw_env);
@@ -775,9 +831,6 @@ static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
{
- if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
- return;
-
mlxsw_core_trap_unregister(mlxsw_env->core,
&mlxsw_env_temp_warn_listener, mlxsw_env);
}
@@ -849,16 +902,13 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
}
static const struct mlxsw_listener mlxsw_env_module_plug_listener =
- MLXSW_EVENTL(mlxsw_env_pmpe_listener_func, PMPE, PMPE);
+ MLXSW_CORE_EVENTL(mlxsw_env_pmpe_listener_func, PMPE);
static int
mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
- return 0;
-
return mlxsw_core_trap_register(mlxsw_core,
&mlxsw_env_module_plug_listener,
mlxsw_env);
@@ -867,21 +917,17 @@ mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
static void
mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
{
- if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
- return;
-
mlxsw_core_trap_unregister(mlxsw_env->core,
&mlxsw_env_module_plug_listener,
mlxsw_env);
}
static int
-mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
- u8 module_count)
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core)
{
int i, err;
- for (i = 0; i < module_count; i++) {
+ for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
mlxsw_reg_pmaos_pack(pmaos_pl, i);
@@ -901,9 +947,6 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
*p_counter = mlxsw_env->module_info[module].module_overheat_counter;
mutex_unlock(&mlxsw_env->module_info_lock);
@@ -916,9 +959,6 @@ void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[module].num_ports_mapped++;
mutex_unlock(&mlxsw_env->module_info_lock);
@@ -929,9 +969,6 @@ void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[module].num_ports_mapped--;
mutex_unlock(&mlxsw_env->module_info_lock);
@@ -943,9 +980,6 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
int err = 0;
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return -EINVAL;
-
mutex_lock(&mlxsw_env->module_info_lock);
if (mlxsw_env->module_info[module].power_mode_policy !=
@@ -975,9 +1009,6 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
- return;
-
mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[module].num_ports_up--;
@@ -999,6 +1030,28 @@ out_unlock:
}
EXPORT_SYMBOL(mlxsw_env_module_port_down);
+static int
+mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int i;
+
+ for (i = 0; i < mlxsw_env->module_count; i++) {
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, 0, i);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+
+ mlxsw_env->module_info[i].type =
+ mlxsw_reg_pmtm_module_type_get(pmtm_pl);
+ }
+
+ return 0;
+}
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
{
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -1037,17 +1090,21 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (err)
goto err_module_plug_event_register;
- err = mlxsw_env_module_oper_state_event_enable(mlxsw_core,
- env->module_count);
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_core);
if (err)
goto err_oper_state_event_enable;
- err = mlxsw_env_module_temp_event_enable(mlxsw_core, env->module_count);
+ err = mlxsw_env_module_temp_event_enable(mlxsw_core);
if (err)
goto err_temp_event_enable;
+ err = mlxsw_env_module_type_set(mlxsw_core);
+ if (err)
+ goto err_type_set;
+
return 0;
+err_type_set:
err_temp_event_enable:
err_oper_state_event_enable:
mlxsw_env_module_plug_event_unregister(env);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index da121b1a84b4..ec6564e5d2ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -12,7 +12,8 @@ struct ethtool_eeprom;
int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int off, int *temp);
-int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
+int mlxsw_env_get_module_info(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, int module,
struct ethtool_modinfo *modinfo);
int mlxsw_env_get_module_eeprom(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index d41afdfbd085..8b170ad92302 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -57,14 +57,14 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp, index;
int err;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
@@ -80,14 +80,14 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
int temp_max, index;
int err;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
@@ -103,9 +103,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
unsigned long val;
int index;
@@ -117,7 +117,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
if (val != 1)
return -EINVAL;
- index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
+ index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
@@ -138,13 +138,13 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mfsm_pl[MLXSW_REG_MFSM_LEN];
int err;
- mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index);
+ mlxsw_reg_mfsm_pack(mfsm_pl, mlxsw_hwmon_attr->type_index);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
@@ -157,9 +157,9 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char fore_pl[MLXSW_REG_FORE_LEN];
bool fault;
int err;
@@ -169,7 +169,7 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n");
return err;
}
- mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault);
+ mlxsw_reg_fore_unpack(fore_pl, mlxsw_hwmon_attr->type_index, &fault);
return sprintf(buf, "%u\n", fault);
}
@@ -178,13 +178,13 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
int err;
- mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, 0);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n");
@@ -198,9 +198,9 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
unsigned long val;
int err;
@@ -211,7 +211,7 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
if (val > 255)
return -EINVAL;
- mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_hwmon_attr->type_index, val);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n");
@@ -224,14 +224,14 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
@@ -261,15 +261,15 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0};
u8 module, fault;
u16 temp;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
1);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
@@ -303,13 +303,13 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
SFP_TEMP_HIGH_WARN, p_temp);
if (err) {
@@ -337,13 +337,13 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
struct device_attribute *attr,
int *p_temp)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
u8 module;
int err;
- module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
+ module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
SFP_TEMP_HIGH_ALARM, p_temp);
if (err) {
@@ -373,11 +373,11 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
return sprintf(buf, "front panel %03u\n",
- mlwsw_hwmon_attr->type_index);
+ mlxsw_hwmon_attr->type_index);
}
static ssize_t
@@ -385,10 +385,10 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
+ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
- struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- int index = mlwsw_hwmon_attr->type_index -
+ struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon;
+ int index = mlxsw_hwmon_attr->type_index -
mlxsw_hwmon->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
@@ -655,9 +655,6 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
u8 module_sensor_max;
int i, err;
- if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
- return 0;
-
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index b29824448aa8..05f54bd982c0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -357,6 +357,10 @@ static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
return 0;
}
+static struct thermal_zone_params mlxsw_thermal_params = {
+ .no_hwmon = true,
+};
+
static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.bind = mlxsw_thermal_bind,
.unbind = mlxsw_thermal_unbind,
@@ -388,11 +392,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
trip->min_state,
THERMAL_WEIGHT_DEFAULT);
if (err < 0)
- goto err_bind_cooling_device;
+ goto err_thermal_zone_bind_cooling_device;
}
return 0;
-err_bind_cooling_device:
+err_thermal_zone_bind_cooling_device:
for (j = i - 1; j >= 0; j--)
thermal_zone_unbind_cooling_device(tzdev, j, cdev);
return err;
@@ -678,7 +682,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
MLXSW_THERMAL_TRIP_MASK,
module_tz,
&mlxsw_thermal_module_ops,
- NULL, 0,
+ &mlxsw_thermal_params,
+ 0,
module_tz->parent->polling_delay);
if (IS_ERR(module_tz->tzdev)) {
err = PTR_ERR(module_tz->tzdev);
@@ -741,9 +746,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
- if (!mlxsw_core_res_query_enabled(core))
- return 0;
-
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
@@ -761,7 +763,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
for (i = 0; i < thermal->tz_module_num; i++) {
err = mlxsw_thermal_module_init(dev, core, thermal, i);
if (err)
- goto err_unreg_tz_module_arr;
+ goto err_thermal_module_init;
}
for (i = 0; i < thermal->tz_module_num; i++) {
@@ -770,12 +772,13 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
continue;
err = mlxsw_thermal_module_tz_init(module_tz);
if (err)
- goto err_unreg_tz_module_arr;
+ goto err_thermal_module_tz_init;
}
return 0;
-err_unreg_tz_module_arr:
+err_thermal_module_tz_init:
+err_thermal_module_init:
for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
@@ -787,9 +790,6 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
{
int i;
- if (!mlxsw_core_res_query_enabled(thermal->core))
- return;
-
for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
@@ -808,7 +808,7 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
&mlxsw_thermal_gearbox_ops,
- NULL, 0,
+ &mlxsw_thermal_params, 0,
gearbox_tz->parent->polling_delay);
if (IS_ERR(gearbox_tz->tzdev))
return PTR_ERR(gearbox_tz->tzdev);
@@ -837,9 +837,6 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
int i;
int err;
- if (!mlxsw_core_res_query_enabled(core))
- return 0;
-
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
if (err)
@@ -866,12 +863,12 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
gearbox_tz->parent = thermal;
err = mlxsw_thermal_gearbox_tz_init(gearbox_tz);
if (err)
- goto err_unreg_tz_gearbox;
+ goto err_thermal_gearbox_tz_init;
}
return 0;
-err_unreg_tz_gearbox:
+err_thermal_gearbox_tz_init:
for (i--; i >= 0; i--)
mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
kfree(thermal->tz_gearbox_arr);
@@ -883,9 +880,6 @@ mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal)
{
int i;
- if (!mlxsw_core_res_query_enabled(thermal->core))
- return;
-
for (i = thermal->tz_gearbox_num - 1; i >= 0; i--)
mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]);
kfree(thermal->tz_gearbox_arr);
@@ -915,7 +909,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
if (err) {
dev_err(dev, "Failed to probe PWMs\n");
- goto err_free_thermal;
+ goto err_reg_query;
}
mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active);
@@ -929,14 +923,14 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsl),
mfsl_pl);
if (err)
- goto err_free_thermal;
+ goto err_reg_query;
/* set the minimal RPMs to 0 */
mlxsw_reg_mfsl_tach_min_set(mfsl_pl, 0);
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsl),
mfsl_pl);
if (err)
- goto err_free_thermal;
+ goto err_reg_write;
}
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
@@ -949,7 +943,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
- goto err_unreg_cdevs;
+ goto err_thermal_cooling_device_register;
}
thermal->cdevs[i] = cdev;
}
@@ -968,43 +962,45 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
MLXSW_THERMAL_TRIP_MASK,
thermal,
&mlxsw_thermal_ops,
- NULL, 0,
+ &mlxsw_thermal_params, 0,
thermal->polling_delay);
if (IS_ERR(thermal->tzdev)) {
err = PTR_ERR(thermal->tzdev);
dev_err(dev, "Failed to register thermal zone\n");
- goto err_unreg_cdevs;
+ goto err_thermal_zone_device_register;
}
err = mlxsw_thermal_modules_init(dev, core, thermal);
if (err)
- goto err_unreg_tzdev;
+ goto err_thermal_modules_init;
err = mlxsw_thermal_gearboxes_init(dev, core, thermal);
if (err)
- goto err_unreg_modules_tzdev;
+ goto err_thermal_gearboxes_init;
err = thermal_zone_device_enable(thermal->tzdev);
if (err)
- goto err_unreg_gearboxes;
+ goto err_thermal_zone_device_enable;
*p_thermal = thermal;
return 0;
-err_unreg_gearboxes:
+err_thermal_zone_device_enable:
mlxsw_thermal_gearboxes_fini(thermal);
-err_unreg_modules_tzdev:
+err_thermal_gearboxes_init:
mlxsw_thermal_modules_fini(thermal);
-err_unreg_tzdev:
+err_thermal_modules_init:
if (thermal->tzdev) {
thermal_zone_device_unregister(thermal->tzdev);
thermal->tzdev = NULL;
}
-err_unreg_cdevs:
+err_thermal_zone_device_register:
+err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
if (thermal->cdevs[i])
thermal_cooling_device_unregister(thermal->cdevs[i]);
-err_free_thermal:
+err_reg_write:
+err_reg_query:
devm_kfree(dev, thermal);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 10d13f5f9c7d..3bc012dafd08 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -110,7 +110,8 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(core, mlxsw_m_port->module, modinfo);
+ return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module,
+ modinfo);
}
static int
@@ -421,6 +422,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_m->core = mlxsw_core;
@@ -436,7 +438,9 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ devl_lock(devlink);
err = mlxsw_m_ports_create(mlxsw_m);
+ devl_unlock(devlink);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
return err;
@@ -448,8 +452,11 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ devl_lock(devlink);
mlxsw_m_ports_remove(mlxsw_m);
+ devl_unlock(devlink);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
@@ -460,7 +467,6 @@ static struct mlxsw_driver mlxsw_m_driver = {
.init = mlxsw_m_init,
.fini = mlxsw_m_fini,
.profile = &mlxsw_m_config_profile,
- .res_query_enabled = true,
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 24cc65018b41..67b1a2f8397f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4482,6 +4482,8 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T BIT(25)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
@@ -6062,6 +6064,58 @@ static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
*slot_index = mlxsw_reg_pllp_slot_index_get(payload);
}
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM register allows query or configuration of module types.
+ * The register can only be set when the module is disabled by PMAOS register
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, slot_index, 0x00, 24, 4);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_4_LANES = 0,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP = 1,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP = 2,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_SINGLE_LANE = 4,
+ MLXSW_REG_PMTM_MODULE_TYPE_BACKPLANE_2_LANES = 8,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP4X = 10,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP2X = 11,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP1X = 12,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
+ MLXSW_REG_PMTM_MODULE_TYPE_OSFP = 15,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD = 16,
+ MLXSW_REG_PMTM_MODULE_TYPE_DSFP = 17,
+ MLXSW_REG_PMTM_MODULE_TYPE_CHIP2CHIP8X = 18,
+ MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR = 19,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 5);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 slot_index, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -6087,9 +6141,7 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
- MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
- MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
+ MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
@@ -6732,12 +6784,14 @@ static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index,
set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC;
else
set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT;
- mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
- if (egress)
+ if (egress) {
+ mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type);
mlxsw_reg_ritr_egress_counter_index_set(payload, index);
- else
+ } else {
+ mlxsw_reg_ritr_ingress_counter_set_type_set(payload, set_type);
mlxsw_reg_ritr_ingress_counter_index_set(payload, index);
+ }
}
static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
@@ -9985,6 +10039,7 @@ enum mlxsw_reg_mcia_eeprom_module_info_id {
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18,
+ MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_OSFP = 0x19,
};
enum mlxsw_reg_mcia_eeprom_module_info {
@@ -11271,24 +11326,24 @@ enum mlxsw_reg_mgpir_device_type {
MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE,
};
-/* device_type
+/* mgpir_device_type
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, device_type, 0x00, 24, 4);
-/* devices_per_flash
+/* mgpir_devices_per_flash
* Number of devices of device_type per flash (can be shared by few devices).
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
-/* num_of_devices
+/* mgpir_num_of_devices
* Number of devices of device_type.
* Access: RO
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
-/* num_of_modules
+/* mgpir_num_of_modules
* Number of modules.
* Access: RO
*/
@@ -12568,6 +12623,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pddr),
MLXSW_REG(pmmp),
MLXSW_REG(pllp),
+ MLXSW_REG(pmtm),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index c7fc650608eb..daacf6291253 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -33,6 +33,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_MAX_REGIONS,
MLXSW_RES_ID_ACL_MAX_GROUPS,
MLXSW_RES_ID_ACL_MAX_GROUP_SIZE,
+ MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS,
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
@@ -90,6 +91,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
[MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904,
[MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905,
+ [MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS] = 0x2908,
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index aa411dec62f0..8eb05090ffec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,52 +45,49 @@
#include "spectrum_ptp.h"
#include "spectrum_trap.h"
+#define MLXSW_SP_FWREV_MINOR 2010
+#define MLXSW_SP_FWREV_SUBMINOR 1006
+
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2010
-#define MLXSW_SP1_FWREV_SUBMINOR 1006
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
.major = MLXSW_SP1_FWREV_MAJOR,
- .minor = MLXSW_SP1_FWREV_MINOR,
- .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
.can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
};
#define MLXSW_SP1_FW_FILENAME \
"mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP1_FWREV_MINOR) \
- "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2010
-#define MLXSW_SP2_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
- .minor = MLXSW_SP2_FWREV_MINOR,
- .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
};
#define MLXSW_SP2_FW_FILENAME \
"mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP2_FWREV_MINOR) \
- "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2010
-#define MLXSW_SP3_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
- .minor = MLXSW_SP3_FWREV_MINOR,
- .subminor = MLXSW_SP3_FWREV_SUBMINOR,
+ .minor = MLXSW_SP_FWREV_MINOR,
+ .subminor = MLXSW_SP_FWREV_SUBMINOR,
};
#define MLXSW_SP3_FW_FILENAME \
"mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
- "." __stringify(MLXSW_SP3_FWREV_MINOR) \
- "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
+ "." __stringify(MLXSW_SP_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
@@ -2148,13 +2145,11 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
- unsigned int max_ports;
u16 local_port;
- max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
@@ -2393,45 +2388,6 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
return 0;
}
-static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_listener listeners[],
- size_t listeners_count)
-{
- int i;
- int err;
-
- for (i = 0; i < listeners_count; i++) {
- err = mlxsw_core_trap_register(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- if (err)
- goto err_listener_register;
-
- }
- return 0;
-
-err_listener_register:
- for (i--; i >= 0; i--) {
- mlxsw_core_trap_unregister(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- }
- return err;
-}
-
-static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_listener listeners[],
- size_t listeners_count)
-{
- int i;
-
- for (i = 0; i < listeners_count; i++) {
- mlxsw_core_trap_unregister(mlxsw_sp->core,
- &listeners[i],
- mlxsw_sp);
- }
-}
-
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_trap *trap;
@@ -2456,21 +2412,23 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_trap_groups_set;
- err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
if (err)
goto err_traps_register;
- err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
- mlxsw_sp->listeners_count);
+ err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count, mlxsw_sp);
if (err)
goto err_extra_traps_init;
return 0;
err_extra_traps_init:
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener),
+ mlxsw_sp);
err_traps_register:
err_trap_groups_set:
err_cpu_policers_set:
@@ -2480,10 +2438,11 @@ err_cpu_policers_set:
static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
- mlxsw_sp->listeners_count);
- mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
- ARRAY_SIZE(mlxsw_sp_listener));
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
+ mlxsw_sp->listeners_count,
+ mlxsw_sp);
+ mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
+ ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
kfree(mlxsw_sp->trap);
}
@@ -2528,42 +2487,6 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->lags);
}
-static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
-{
- char htgt_pl[MLXSW_REG_HTGT_LEN];
- int err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
- if (err)
- return err;
-
- mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
- MLXSW_REG_HTGT_INVALID_POLICER,
- MLXSW_REG_HTGT_DEFAULT_PRIORITY,
- MLXSW_REG_HTGT_DEFAULT_TC);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
-}
-
static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.clock_init = mlxsw_sp1_ptp_clock_init,
.clock_fini = mlxsw_sp1_ptp_clock_fini,
@@ -2895,6 +2818,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -3055,7 +2979,9 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_sample_trigger_init;
}
+ devl_lock(devlink);
err = mlxsw_sp_ports_create(mlxsw_sp);
+ devl_unlock(devlink);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
goto err_ports_create;
@@ -3236,8 +3162,12 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ devl_lock(devlink);
mlxsw_sp_ports_remove(mlxsw_sp);
+ devl_unlock(devlink);
+
rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
@@ -3677,7 +3607,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.fw_filename = MLXSW_SP1_FW_FILENAME,
.init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3705,9 +3634,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3717,7 +3643,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3746,9 +3671,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3758,7 +3680,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3787,9 +3708,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3797,7 +3715,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp4_init,
.fini = mlxsw_sp_fini,
- .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
.port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit,
.sb_pool_get = mlxsw_sp_sb_pool_get,
@@ -3826,9 +3743,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
- .res_query_enabled = true,
- .fw_fatal_enabled = true,
- .temp_warn_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
@@ -4916,6 +4830,22 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static bool mlxsw_sp_netdevice_event_is_router(unsigned long event)
+{
+ switch (event) {
+ case NETDEV_PRE_CHANGEADDR:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGEMTU:
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -4940,9 +4870,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
event, ptr);
- else if (event == NETDEV_PRE_CHANGEADDR ||
- event == NETDEV_CHANGEADDR ||
- event == NETDEV_CHANGEMTU)
+ else if (mlxsw_sp_netdevice_event_is_router(event))
err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bb2442e1f705..20588e699588 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -481,6 +481,13 @@ int
mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_8021ad_tagged,
bool is_8021q_tagged);
+static inline bool
+mlxsw_sp_local_port_is_valid(struct mlxsw_sp *mlxsw_sp, u16 local_port)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ return local_port < max_ports && local_port;
+}
/* spectrum_buffers.c */
struct mlxsw_sp_hdroom_prio {
@@ -813,6 +820,24 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum2_kvdl.c */
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops;
+enum mlxsw_sp_acl_mangle_field {
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4,
+};
+
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
@@ -821,9 +846,14 @@ struct mlxsw_sp_acl_rule_info {
ingress_bind_blocker:1,
egress_bind_blocker:1,
counter_valid:1,
- policer_index_valid:1;
+ policer_index_valid:1,
+ ipv6_valid:1;
unsigned int counter_index;
u16 policer_index;
+ struct {
+ u32 prev_val;
+ enum mlxsw_sp_acl_mangle_field prev_field;
+ } ipv6;
};
/* spectrum_flow.c */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index a9fff8adc75e..d20e794e01ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -213,7 +213,6 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
- size_t usage_size;
u64 resource_size;
int err;
@@ -225,8 +224,8 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
}
nr_entries = div_u64(resource_size, info->alloc_size);
- usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
- part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ part = kzalloc(struct_size(part, usage, BITS_TO_LONGS(nr_entries)),
+ GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index ad69913f19c1..5b0210862655 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -77,7 +77,14 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
int i;
int err;
+ /* Some TCAM regions are not exposed to the host and used internally
+ * by the device. Allocate KVDL entries for the default actions of
+ * these regions to avoid the host from overwriting them.
+ */
tcam->kvdl_count = _tcam->max_regions;
+ if (MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_DEFAULT_ACTIONS))
+ tcam->kvdl_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_DEFAULT_ACTIONS);
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
tcam->kvdl_count, &tcam->kvdl_index);
if (err)
@@ -97,7 +104,10 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
goto err_afa_block_continue;
enc_actions = mlxsw_afa_block_cur_set(afa_block);
- for (i = 0; i < tcam->kvdl_count; i++) {
+ /* Only write to KVDL entries used by TCAM regions exposed to the
+ * host.
+ */
+ for (i = 0; i < _tcam->max_regions; i++) {
mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
true, enc_actions);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 70c11bfac08f..6c5af018546f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -505,14 +505,6 @@ int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
extack);
}
-enum mlxsw_sp_acl_mangle_field {
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
- MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
-};
-
struct mlxsw_sp_acl_mangle_action {
enum flow_action_mangle_base htype;
/* Offset is u32-aligned. */
@@ -561,6 +553,18 @@ static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(12, 0x00000000, 0, IP4_SIP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(16, 0x00000000, 0, IP4_DIP),
+
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(8, 0x00000000, 0, IP6_SIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(12, 0x00000000, 0, IP6_SIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(16, 0x00000000, 0, IP6_SIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(20, 0x00000000, 0, IP6_SIP_4),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(24, 0x00000000, 0, IP6_DIP_1),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(28, 0x00000000, 0, IP6_DIP_2),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(32, 0x00000000, 0, IP6_DIP_3),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(36, 0x00000000, 0, IP6_DIP_4),
};
static int
@@ -599,6 +603,22 @@ static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
return err;
}
+static int
+mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(struct mlxsw_sp_acl_rule_info *rulei,
+ enum mlxsw_sp_acl_mangle_field field,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ if (!rulei->ipv6_valid) {
+ rulei->ipv6.prev_val = val;
+ rulei->ipv6_valid = true;
+ rulei->ipv6.prev_field = field;
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field order");
+ return -EOPNOTSUPP;
+}
+
static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_acl_mangle_action *mact,
@@ -615,6 +635,61 @@ static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
+ /* IPv4 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_SIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, false,
+ true, val, 0, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP4_DIP:
+ return mlxsw_afa_block_append_ip(rulei->act_block, true,
+ true, val, 0, extack);
+ /* IPv6 fields */
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1:
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3:
+ return mlxsw_sp2_acl_rulei_act_mangle_field_ip_odd(rulei,
+ mact->field,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_SIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ false, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_2:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_1) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, false, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_4:
+ if (rulei->ipv6_valid &&
+ rulei->ipv6.prev_field == MLXSW_SP_ACL_MANGLE_FIELD_IP6_DIP_3) {
+ rulei->ipv6_valid = false;
+ return mlxsw_afa_block_append_ip(rulei->act_block,
+ true, true, val,
+ rulei->ipv6.prev_val,
+ extack);
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 1a2fef2a5379..5d494fabf93d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -266,10 +266,10 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
if (!rif)
continue;
if (enable)
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
+ mlxsw_sp_rif_counter_alloc(rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
else
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
+ mlxsw_sp_rif_counter_free(rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
mutex_unlock(&mlxsw_sp->router->lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 20530712eadb..8b5d7f83b9b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1034,13 +1034,10 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err;
-
- err = mlxsw_env_get_module_info(mlxsw_sp->core,
- mlxsw_sp_port->mapping.module,
- modinfo);
- return err;
+ return mlxsw_env_get_module_info(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module,
+ modinfo);
}
static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
@@ -1048,13 +1045,10 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err;
-
- err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
- mlxsw_sp_port->mapping.module, ee,
- data);
- return err;
+ return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module, ee,
+ data);
}
static int
@@ -1273,12 +1267,22 @@ struct mlxsw_sp1_port_link_mode {
static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
+ },
+ {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
},
{
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_1000BASE_T,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = SPEED_1000,
+ },
+ {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index bb417db773b9..e91fb205e0b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -15,6 +15,46 @@
#include "spectrum.h"
#include "core_acl_flex_keys.h"
+static int mlxsw_sp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -191,10 +231,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ err = mlxsw_sp_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
/* The kernel might adjust the requested burst size so
* that it is not exactly a power of two. Re-adjust it
@@ -233,6 +272,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
}
+
+ if (rulei->ipv6_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 0ff163fbc775..35422e64d89f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -568,12 +568,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp1_ptp_key key;
u8 types;
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d40762cfc453..79deb19e3a19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -225,6 +225,64 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+struct mlxsw_sp_rif_counter_set_basic {
+ u64 good_unicast_packets;
+ u64 good_multicast_packets;
+ u64 good_broadcast_packets;
+ u64 good_unicast_bytes;
+ u64 good_multicast_bytes;
+ u64 good_broadcast_bytes;
+ u64 error_packets;
+ u64 discard_packets;
+ u64 error_bytes;
+ u64 discard_bytes;
+};
+
+static int
+mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
+ enum mlxsw_sp_rif_counter_dir dir,
+ struct mlxsw_sp_rif_counter_set_basic *set)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ricnt_pl[MLXSW_REG_RICNT_LEN];
+ unsigned int *p_counter_index;
+ int err;
+
+ if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return -EINVAL;
+
+ p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
+ if (!p_counter_index)
+ return -EINVAL;
+
+ mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
+ MLXSW_REG_RICNT_OPCODE_CLEAR);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
+ if (err)
+ return err;
+
+ if (!set)
+ return 0;
+
+#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
+ (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
+
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
+ MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
+
+#undef MLXSW_SP_RIF_COUNTER_EXTRACT
+
+ return 0;
+}
+
static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
@@ -235,16 +293,20 @@ static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
}
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
int err;
+ if (mlxsw_sp_rif_counter_valid_get(rif, dir))
+ return 0;
+
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (!p_counter_index)
return -EINVAL;
+
err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
p_counter_index);
if (err)
@@ -268,10 +330,10 @@ err_counter_clear:
return err;
}
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
unsigned int *p_counter_index;
if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
@@ -296,14 +358,12 @@ static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
if (!devlink_dpipe_table_counter_enabled(devlink,
MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
return;
- mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
@@ -8148,6 +8208,166 @@ u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->ul_rif_id;
}
+static bool
+mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS) &&
+ mlxsw_sp_rif_counter_valid_get(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ if (err)
+ return err;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ NULL);
+ if (err)
+ goto err_clear_ingress;
+
+ err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ if (err)
+ goto err_alloc_egress;
+
+ /* Clear stale data. */
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ NULL);
+ if (err)
+ goto err_clear_egress;
+
+ return 0;
+
+err_clear_egress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+err_alloc_egress:
+err_clear_ingress:
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ return err;
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+ mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+}
+
+static void
+mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return;
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
+ struct rtnl_hw_stats64 *p_stats)
+{
+ struct mlxsw_sp_rif_counter_set_basic ingress;
+ struct mlxsw_sp_rif_counter_set_basic egress;
+ int err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_INGRESS,
+ &ingress);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_counter_fetch_clear(rif,
+ MLXSW_SP_RIF_COUNTER_EGRESS,
+ &egress);
+ if (err)
+ return err;
+
+#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
+ ((SET.good_unicast_ ## SFX) + \
+ (SET.good_multicast_ ## SFX) + \
+ (SET.good_broadcast_ ## SFX))
+
+ p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
+ p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
+ p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
+ p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
+ p_stats->rx_errors = ingress.error_packets;
+ p_stats->tx_errors = egress.error_packets;
+ p_stats->rx_dropped = ingress.discard_packets;
+ p_stats->tx_dropped = egress.discard_packets;
+ p_stats->multicast = ingress.good_multicast_packets +
+ ingress.good_broadcast_packets;
+
+#undef MLXSW_SP_ROUTER_ALL_GOOD
+
+ return 0;
+}
+
+static int
+mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ struct rtnl_hw_stats64 stats = {};
+ int err;
+
+ if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
+ return 0;
+
+ err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
+ if (err)
+ return err;
+
+ netdev_offload_xstats_report_delta(info->report_delta, &stats);
+ return 0;
+}
+
+struct mlxsw_sp_router_hwstats_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work =
+ container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
+ work);
+
+ rtnl_lock();
+ rtnl_offload_xstats_notify(hws_work->dev);
+ rtnl_unlock();
+ dev_put(hws_work->dev);
+ kfree(hws_work);
+}
+
+static void
+mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
+{
+ struct mlxsw_sp_router_hwstats_notify_work *hws_work;
+
+ /* To collect notification payload, the core ends up sending another
+ * notifier block message, which would deadlock on the attempt to
+ * acquire the router lock again. Just postpone the notification until
+ * later.
+ */
+
+ hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
+ if (!hws_work)
+ return;
+
+ INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
+ dev_hold(dev);
+ hws_work->dev = dev;
+ mlxsw_core_schedule_work(&hws_work->work);
+}
+
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
return rif->dev->ifindex;
@@ -8158,6 +8378,16 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
+static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
+{
+ struct rtnl_hw_stats64 stats = {};
+
+ if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
+ netdev_offload_xstats_push_delta(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3,
+ &stats);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -8218,10 +8448,19 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
- mlxsw_sp_rif_counters_alloc(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ err = mlxsw_sp_router_port_l3_stats_enable(rif);
+ if (err)
+ goto err_stats_enable;
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_alloc(rif);
+ }
return rif;
+err_stats_enable:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -8251,7 +8490,15 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp_rif_counters_free(rif);
+ if (netdev_offload_xstats_enabled(rif->dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ mlxsw_sp_rif_push_l3_stats(rif);
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ } else {
+ mlxsw_sp_rif_counters_free(rif);
+ }
+
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
@@ -9128,6 +9375,35 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
return -ENOBUFS;
}
+static int
+mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
+ unsigned long event,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ switch (info->type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ break;
+ default:
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ return mlxsw_sp_router_port_l3_stats_enable(rif);
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ mlxsw_sp_router_port_l3_stats_disable(rif);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ mlxsw_sp_router_port_l3_stats_report_used(rif, info);
+ return 0;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
+ }
+
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr)
{
@@ -9153,6 +9429,15 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
case NETDEV_PRE_CHANGEADDR:
err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
break;
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, ptr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 99e8371a82a5..fa829658a11b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -159,11 +159,9 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
u64 *cnt);
-void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir);
-int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif,
+int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir);
struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index f9671cc53002..b73466470f75 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -269,8 +269,7 @@ mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
return NULL;
- if (!vid ||
- br_vlan_get_info(br_dev, vid, &vinfo) ||
+ if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) ||
!(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 65c1724c63b0..3bf12092a8a2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1234,8 +1234,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
- if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev))
+ if (br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -2616,7 +2615,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
- unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -2630,7 +2628,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index d167d93e4c12..82d55fc27edc 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -293,7 +293,7 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
*/
static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
{
- netif_rx_ni(skb);
+ netif_rx(skb);
}
/**
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index d024983815da..2b3eb5ed8233 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5225,7 +5225,6 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
* Linux network device functions
*/
-static unsigned long next_jiffies;
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netdev_netpoll(struct net_device *dev)
@@ -5411,10 +5410,12 @@ static int netdev_open(struct net_device *dev)
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
+ unsigned long next_jiffies;
int i;
int p;
int rc = 0;
+ next_jiffies = jiffies + HZ * 2;
priv->multicast = 0;
priv->promiscuous = 0;
@@ -5428,10 +5429,7 @@ static int netdev_open(struct net_device *dev)
if (rc)
return rc;
for (i = 0; i < hw->mib_port_cnt; i++) {
- if (next_jiffies < jiffies)
- next_jiffies = jiffies + HZ * 2;
- else
- next_jiffies += HZ * 1;
+ next_jiffies += HZ * 1;
hw_priv->counter[i].time = next_jiffies;
hw->port_mib[i].state = media_disconnected;
port_init_cnt(hw, i);
@@ -6563,6 +6561,7 @@ static void mib_read_work(struct work_struct *work)
struct dev_info *hw_priv =
container_of(work, struct dev_info, mib_read);
struct ksz_hw *hw = &hw_priv->hw;
+ unsigned long next_jiffies;
struct ksz_port_mib *mib;
int i;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index db5a3edb4c3c..559ad94a44d0 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -975,7 +975,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
/* update statistics */
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
/*
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 91a755efe2e6..c8fe8b31f07b 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -7,6 +7,8 @@
#include <linux/phy.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#include <linux/sched.h>
+#include <linux/iopoll.h>
/* eeprom */
#define LAN743X_EEPROM_MAGIC (0x74A5)
@@ -19,6 +21,10 @@
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
+#define LOCK_TIMEOUT_MAX_CNT (100) // 1 sec (10 msce * 100)
+
+#define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
+
static int lan743x_otp_power_up(struct lan743x_adapter *adapter)
{
u32 reg_value;
@@ -149,6 +155,217 @@ static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset,
return 0;
}
+static int lan743x_hs_syslock_acquire(struct lan743x_adapter *adapter,
+ u16 timeout)
+{
+ u16 timeout_cnt = 0;
+ u32 val;
+
+ do {
+ spin_lock(&adapter->eth_syslock_spinlock);
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG,
+ SYS_LOCK_REG_ENET_SS_LOCK_);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ if (val & SYS_LOCK_REG_ENET_SS_LOCK_) {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+ } else {
+ adapter->eth_syslock_acquire_cnt++;
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+ spin_unlock(&adapter->eth_syslock_spinlock);
+ break;
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+
+ if (timeout_cnt++ < timeout)
+ usleep_range(10000, 11000);
+ else
+ return -ETIMEDOUT;
+ } while (true);
+
+ return 0;
+}
+
+static void lan743x_hs_syslock_release(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ spin_lock(&adapter->eth_syslock_spinlock);
+ WARN_ON(adapter->eth_syslock_acquire_cnt == 0);
+
+ if (adapter->eth_syslock_acquire_cnt) {
+ adapter->eth_syslock_acquire_cnt--;
+ if (adapter->eth_syslock_acquire_cnt == 0) {
+ lan743x_csr_write(adapter, ETH_SYSTEM_SYS_LOCK_REG, 0);
+ val = lan743x_csr_read(adapter,
+ ETH_SYSTEM_SYS_LOCK_REG);
+ WARN_ON((val & SYS_LOCK_REG_ENET_SS_LOCK_) != 0);
+ }
+ }
+
+ spin_unlock(&adapter->eth_syslock_spinlock);
+}
+
+static void lan743x_hs_otp_power_up(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (reg_value & OTP_PWR_DN_PWRDN_N_) {
+ reg_value &= ~OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_power_down(struct lan743x_adapter *adapter)
+{
+ u32 reg_value;
+
+ reg_value = lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) {
+ reg_value |= OTP_PWR_DN_PWRDN_N_;
+ lan743x_csr_write(adapter, HS_OTP_PWR_DN, reg_value);
+ /* To flush the posted write so the subsequent delay is
+ * guaranteed to happen after the write at the hardware
+ */
+ lan743x_csr_read(adapter, HS_OTP_PWR_DN);
+ udelay(1);
+ }
+}
+
+static void lan743x_hs_otp_set_address(struct lan743x_adapter *adapter,
+ u32 address)
+{
+ lan743x_csr_write(adapter, HS_OTP_ADDR_HIGH, (address >> 8) & 0x03);
+ lan743x_csr_write(adapter, HS_OTP_ADDR_LOW, address & 0xFF);
+}
+
+static void lan743x_hs_otp_read_go(struct lan743x_adapter *adapter)
+{
+ lan743x_csr_write(adapter, HS_OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+}
+
+static int lan743x_hs_otp_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_OTP_STATUS, val,
+ !(val & OTP_STATUS_BUSY_),
+ 80, 10000);
+}
+
+static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_hs_otp_read_go(adapter);
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ data[i] = lan743x_csr_read(adapter, HS_OTP_READ_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
+static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
+ u32 length, u8 *data)
+{
+ int ret;
+ int i;
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_power_up(adapter);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ /* set to BYTE program mode */
+ lan743x_csr_write(adapter, HS_OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+
+ lan743x_hs_syslock_release(adapter);
+
+ for (i = 0; i < length; i++) {
+ ret = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+ lan743x_hs_otp_set_address(adapter, offset + i);
+
+ lan743x_csr_write(adapter, HS_OTP_PRGM_DATA, data[i]);
+ lan743x_csr_write(adapter, HS_OTP_TST_CMD,
+ OTP_TST_CMD_PRGVRFY_);
+ lan743x_csr_write(adapter, HS_OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+ ret = lan743x_hs_otp_cmd_cmplt_chk(adapter);
+ if (ret < 0)
+ goto power_down;
+
+ lan743x_hs_syslock_release(adapter);
+ }
+
+ ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (ret < 0)
+ return ret;
+
+power_down:
+ lan743x_hs_otp_power_down(adapter);
+ lan743x_hs_syslock_release(adapter);
+
+ return ret;
+}
+
static int lan743x_eeprom_wait(struct lan743x_adapter *adapter)
{
unsigned long start_time = jiffies;
@@ -263,6 +480,100 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter,
return 0;
}
+static int lan743x_hs_eeprom_cmd_cmplt_chk(struct lan743x_adapter *adapter)
+{
+ u32 val;
+
+ return readx_poll_timeout(LAN743X_CSR_READ_OP, HS_E2P_CMD, val,
+ (!(val & HS_E2P_CMD_EPC_BUSY_) ||
+ (val & HS_E2P_CMD_EPC_TIMEOUT_)),
+ 50, 10000);
+}
+
+static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_READ_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ if (retval < 0) {
+ lan743x_hs_syslock_release(adapter);
+ return retval;
+ }
+
+ val = lan743x_csr_read(adapter, HS_E2P_DATA);
+
+ lan743x_hs_syslock_release(adapter);
+
+ data[i] = val & 0xFF;
+ offset++;
+ }
+
+ return 0;
+}
+
+static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
+ u32 offset, u32 length, u8 *data)
+{
+ int retval;
+ u32 val;
+ int i;
+
+ retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ for (i = 0; i < length; i++) {
+ retval = lan743x_hs_syslock_acquire(adapter,
+ LOCK_TIMEOUT_MAX_CNT);
+ if (retval < 0)
+ return retval;
+
+ /* Fill data register */
+ val = data[i];
+ lan743x_csr_write(adapter, HS_E2P_DATA, val);
+
+ /* Send "write" command */
+ val = HS_E2P_CMD_EPC_BUSY_ | HS_E2P_CMD_EPC_CMD_WRITE_;
+ val |= (offset & HS_E2P_CMD_EPC_ADDR_MASK_);
+ lan743x_csr_write(adapter, HS_E2P_CMD, val);
+
+ retval = lan743x_hs_eeprom_cmd_cmplt_chk(adapter);
+ lan743x_hs_syslock_release(adapter);
+ if (retval < 0)
+ return retval;
+
+ offset++;
+ }
+
+ return 0;
+}
+
static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -304,10 +615,21 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
struct lan743x_adapter *adapter = netdev_priv(netdev);
int ret = 0;
- if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- ret = lan743x_otp_read(adapter, ee->offset, ee->len, data);
- else
- ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data);
+ if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_read(adapter, ee->offset,
+ ee->len, data);
+ } else {
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_read(adapter, ee->offset,
+ ee->len, data);
+ }
return ret;
}
@@ -321,13 +643,22 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev,
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) {
/* Beware! OTP is One Time Programming ONLY! */
if (ee->magic == LAN743X_OTP_MAGIC) {
- ret = lan743x_otp_write(adapter, ee->offset,
- ee->len, data);
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_otp_write(adapter, ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_otp_write(adapter, ee->offset,
+ ee->len, data);
}
} else {
if (ee->magic == LAN743X_EEPROM_MAGIC) {
- ret = lan743x_eeprom_write(adapter, ee->offset,
- ee->len, data);
+ if (adapter->is_pci11x1x)
+ ret = lan743x_hs_eeprom_write(adapter,
+ ee->offset,
+ ee->len, data);
+ else
+ ret = lan743x_eeprom_write(adapter, ee->offset,
+ ee->len, data);
}
}
@@ -365,6 +696,14 @@ static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = {
"RX Queue 3 Frames",
};
+static const char lan743x_tx_queue_cnt_strings[][ETH_GSTRING_LEN] = {
+ "TX Queue 0 Frames",
+ "TX Queue 1 Frames",
+ "TX Queue 2 Frames",
+ "TX Queue 3 Frames",
+ "TX Total Queue Frames",
+};
+
static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = {
"RX Total Frames",
"EEE RX LPI Transitions",
@@ -462,6 +801,8 @@ static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = {
static void lan743x_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, lan743x_set0_hw_cnt_strings,
@@ -473,6 +814,13 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev,
sizeof(lan743x_set1_sw_cnt_strings)],
lan743x_set2_hw_cnt_strings,
sizeof(lan743x_set2_hw_cnt_strings));
+ if (adapter->is_pci11x1x) {
+ memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) +
+ sizeof(lan743x_set1_sw_cnt_strings) +
+ sizeof(lan743x_set2_hw_cnt_strings)],
+ lan743x_tx_queue_cnt_strings,
+ sizeof(lan743x_tx_queue_cnt_strings));
+ }
break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, lan743x_priv_flags_strings,
@@ -486,7 +834,9 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u64 total_queue_count = 0;
int data_index = 0;
+ u64 pkt_cnt;
u32 buf;
int i;
@@ -500,6 +850,14 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev,
buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]);
data[data_index++] = (u64)buf;
}
+ if (adapter->is_pci11x1x) {
+ for (i = 0; i < ARRAY_SIZE(adapter->tx); i++) {
+ pkt_cnt = (u64)(adapter->tx[i].frame_count);
+ data[data_index++] = pkt_cnt;
+ total_queue_count += pkt_cnt;
+ }
+ data[data_index++] = total_queue_count;
+ }
}
static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev)
@@ -520,6 +878,8 @@ static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags)
static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
switch (sset) {
case ETH_SS_STATS:
{
@@ -528,6 +888,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings);
ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings);
ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings);
+ if (adapter->is_pci11x1x)
+ ret += ARRAY_SIZE(lan743x_tx_queue_cnt_strings);
return ret;
}
case ETH_SS_PRIV_FLAGS:
@@ -750,7 +1112,7 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
}
if (eee->eee_enabled) {
- ret = phy_init_eee(phydev, 0);
+ ret = phy_init_eee(phydev, false);
if (ret) {
netif_err(adapter, drv, adapter->netdev,
"EEE initialization failed\n");
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 8c6390d95158..9ac0c2b96a15 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -18,6 +18,51 @@
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#define MMD_ACCESS_ADDRESS 0
+#define MMD_ACCESS_WRITE 1
+#define MMD_ACCESS_READ 2
+#define MMD_ACCESS_READ_INC 3
+
+static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+{
+ u32 chip_rev;
+ u32 strap;
+
+ strap = lan743x_csr_read(adapter, STRAP_READ);
+ if (strap & STRAP_READ_USE_SGMII_EN_) {
+ if (strap & STRAP_READ_SGMII_EN_)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "STRAP_READ: 0x%08X\n", strap);
+ } else {
+ chip_rev = lan743x_csr_read(adapter, FPGA_REV);
+ if (chip_rev) {
+ if (chip_rev & FPGA_SGMII_OP)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "FPGA_REV: 0x%08X\n", chip_rev);
+ } else {
+ adapter->is_sgmii_en = false;
+ }
+ }
+}
+
+static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
+{
+ struct lan743x_csr *csr = &adapter->csr;
+ u32 id_rev = csr->id_rev;
+
+ if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
+ ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
+ return true;
+ }
+ return false;
+}
+
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
pci_release_selected_regions(adapter->pdev,
@@ -250,7 +295,7 @@ static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
}
}
if (int_sts & INT_BIT_ALL_TX_) {
- for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
+ for (channel = 0; channel < adapter->used_tx_channels;
channel++) {
u32 int_bit = INT_BIT_DMA_TX_(channel);
@@ -410,7 +455,7 @@ static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
{
int index;
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < adapter->max_vector_count; index++) {
if (adapter->intr.vector_list[index].int_mask & int_mask)
return adapter->intr.vector_list[index].flags;
}
@@ -423,9 +468,12 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter)
int index = 0;
lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
- lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
+ else
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < intr->number_of_vectors; index++) {
if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
lan743x_intr_unregister_isr(adapter, index);
intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
@@ -445,9 +493,11 @@ static void lan743x_intr_close(struct lan743x_adapter *adapter)
static int lan743x_intr_open(struct lan743x_adapter *adapter)
{
- struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
+ struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
struct lan743x_intr *intr = &adapter->intr;
+ unsigned int used_tx_channels;
u32 int_vec_en_auto_clr = 0;
+ u8 max_vector_count;
u32 int_vec_map0 = 0;
u32 int_vec_map1 = 0;
int ret = -ENODEV;
@@ -457,13 +507,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
intr->number_of_vectors = 0;
/* Try to set up MSIX interrupts */
+ max_vector_count = adapter->max_vector_count;
memset(&msix_entries[0], 0,
- sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
+ sizeof(struct msix_entry) * max_vector_count);
+ for (index = 0; index < max_vector_count; index++)
msix_entries[index].entry = index;
+ used_tx_channels = adapter->used_tx_channels;
ret = pci_enable_msix_range(adapter->pdev,
msix_entries, 1,
- 1 + LAN743X_USED_TX_CHANNELS +
+ 1 + used_tx_channels +
LAN743X_USED_RX_CHANNELS);
if (ret > 0) {
@@ -556,8 +608,15 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
- lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
- lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ if (adapter->is_pci11x1x) {
+ lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
+ } else {
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ }
lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
}
@@ -570,8 +629,8 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
if (intr->number_of_vectors > 1) {
int number_of_tx_vectors = intr->number_of_vectors - 1;
- if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
- number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
+ if (number_of_tx_vectors > used_tx_channels)
+ number_of_tx_vectors = used_tx_channels;
flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
@@ -609,9 +668,9 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
INT_VEC_EN_(vector));
}
}
- if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
+ if ((intr->number_of_vectors - used_tx_channels) > 1) {
int number_of_rx_vectors = intr->number_of_vectors -
- LAN743X_USED_TX_CHANNELS - 1;
+ used_tx_channels - 1;
if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
@@ -632,7 +691,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
}
for (index = 0; index < number_of_rx_vectors; index++) {
- int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
+ int vector = index + 1 + used_tx_channels;
u32 int_bit = INT_BIT_DMA_RX_(index);
/* map RX interrupt to vector */
@@ -760,6 +819,96 @@ static int lan743x_mdiobus_write(struct mii_bus *bus,
return ret;
}
+static u32 lan743x_mac_mmd_access(int id, int index, int op)
+{
+ u16 dev_addr;
+ u32 ret;
+
+ dev_addr = (index >> 16) & 0x1f;
+ ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
+ MAC_MII_ACC_PHY_ADDR_MASK_;
+ ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
+ MAC_MII_ACC_MIIMMD_MASK_;
+ if (op == MMD_ACCESS_WRITE)
+ ret |= MAC_MII_ACC_MIICMD_WRITE_;
+ else if (op == MMD_ACCESS_READ)
+ ret |= MAC_MII_ACC_MIICMD_READ_;
+ else if (op == MMD_ACCESS_READ_INC)
+ ret |= MAC_MII_ACC_MIICMD_READ_INC_;
+ else
+ ret |= MAC_MII_ACC_MIICMD_ADDR_;
+ ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
+
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* comfirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
+ }
+
+ ret = lan743x_mdiobus_read(bus, phy_id, index);
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* confirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ } else {
+ ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
+ }
+
+ return ret;
+}
+
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
u8 *addr)
{
@@ -1627,6 +1776,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
dev_kfree_skb_irq(skb);
goto unlock;
}
+ tx->frame_count++;
if (gso)
lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
@@ -2491,7 +2641,8 @@ static int lan743x_netdev_close(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
int index;
- lan743x_tx_close(&adapter->tx[0]);
+ for (index = 0; index < adapter->used_tx_channels; index++)
+ lan743x_tx_close(&adapter->tx[index]);
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
@@ -2537,12 +2688,19 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_rx;
}
- ret = lan743x_tx_open(&adapter->tx[0]);
- if (ret)
- goto close_rx;
-
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ ret = lan743x_tx_open(&adapter->tx[index]);
+ if (ret)
+ goto close_tx;
+ }
return 0;
+close_tx:
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ if (adapter->tx[index].ring_cpu_ptr)
+ lan743x_tx_close(&adapter->tx[index]);
+ }
+
close_rx:
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
if (adapter->rx[index].ring_cpu_ptr)
@@ -2569,8 +2727,12 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u8 ch = 0;
- return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
+ if (adapter->is_pci11x1x)
+ ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
+
+ return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
}
static int lan743x_netdev_ioctl(struct net_device *netdev,
@@ -2701,6 +2863,19 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
int index;
int ret;
+ adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
+ if (adapter->is_pci11x1x) {
+ adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
+ adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
+ pci11x1x_strap_get_status(adapter);
+ spin_lock_init(&adapter->eth_syslock_spinlock);
+ } else {
+ adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+ adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
+ }
+
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
@@ -2731,15 +2906,19 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->rx[index].channel_number = index;
}
- tx = &adapter->tx[0];
- tx->adapter = adapter;
- tx->channel_number = 0;
- spin_lock_init(&tx->ring_lock);
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ tx = &adapter->tx[index];
+ tx->adapter = adapter;
+ tx->channel_number = index;
+ spin_lock_init(&tx->ring_lock);
+ }
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
+ u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -2749,9 +2928,35 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
}
adapter->mdiobus->priv = (void *)adapter;
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
- adapter->mdiobus->name = "lan743x-mdiobus";
+ if (adapter->is_pci11x1x) {
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII operation\n");
+ } else {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "(R)GMII operation\n");
+ }
+
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
+ adapter->mdiobus->read = lan743x_mdiobus_c45_read;
+ adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->name = "lan743x-mdiobus-c45";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n");
+ } else {
+ adapter->mdiobus->read = lan743x_mdiobus_read;
+ adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
+ }
+
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
"pci-%s", pci_name(adapter->pdev));
@@ -2786,8 +2991,17 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
struct net_device *netdev = NULL;
int ret = -ENODEV;
- netdev = devm_alloc_etherdev(&pdev->dev,
- sizeof(struct lan743x_adapter));
+ if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
+ id->device == PCI_DEVICE_ID_SMSC_A041) {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ PCI11X1X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ } else {
+ netdev = devm_alloc_etherdev(&pdev->dev,
+ sizeof(struct lan743x_adapter));
+ }
+
if (!netdev)
goto return_error;
@@ -3056,6 +3270,8 @@ static const struct dev_pm_ops lan743x_pm_ops = {
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
{ 0, }
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index aaf7aaeaba0c..1ca5f3216403 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -16,8 +16,13 @@
#define ID_REV_ID_MASK_ (0xFFFF0000)
#define ID_REV_ID_LAN7430_ (0x74300000)
#define ID_REV_ID_LAN7431_ (0x74310000)
-#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
- (((id_rev) & 0xFFF00000) == 0x74300000)
+#define ID_REV_ID_LAN743X_ (0x74300000)
+#define ID_REV_ID_A011_ (0xA0110000) // PCI11010
+#define ID_REV_ID_A041_ (0xA0410000) // PCI11414
+#define ID_REV_ID_A0X1_ (0xA0010000)
+#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
+ ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \
+ (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_))
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
@@ -25,6 +30,17 @@
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
+#define FPGA_SGMII_OP BIT(24)
+
+#define STRAP_READ (0x0C)
+#define STRAP_READ_USE_SGMII_EN_ BIT(22)
+#define STRAP_READ_SGMII_EN_ BIT(6)
+#define STRAP_READ_SGMII_REFCLK_ BIT(5)
+#define STRAP_READ_SGMII_2_5G_ BIT(4)
+#define STRAP_READ_BASE_X_ BIT(3)
+#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2)
+#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1)
+#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
#define HW_CFG (0x010)
#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
@@ -70,6 +86,40 @@
#define E2P_DATA (0x044)
+/* Hearthstone top level & System Reg Addresses */
+#define ETH_CTRL_REG_ADDR_BASE (0x0000)
+#define ETH_SYS_REG_ADDR_BASE (0x4000)
+#define CONFIG_REG_ADDR_BASE (0x0000)
+#define ETH_EEPROM_REG_ADDR_BASE (0x0E00)
+#define ETH_OTP_REG_ADDR_BASE (0x1000)
+#define SYS_LOCK_REG (0x00A0)
+#define SYS_LOCK_REG_MAIN_LOCK_ BIT(7)
+#define SYS_LOCK_REG_GEN_PERI_LOCK_ BIT(5)
+#define SYS_LOCK_REG_SPI_PERI_LOCK_ BIT(4)
+#define SYS_LOCK_REG_SMBUS_PERI_LOCK_ BIT(3)
+#define SYS_LOCK_REG_UART_SS_LOCK_ BIT(2)
+#define SYS_LOCK_REG_ENET_SS_LOCK_ BIT(1)
+#define SYS_LOCK_REG_USB_SS_LOCK_ BIT(0)
+#define ETH_SYSTEM_SYS_LOCK_REG (ETH_SYS_REG_ADDR_BASE + \
+ CONFIG_REG_ADDR_BASE + \
+ SYS_LOCK_REG)
+#define HS_EEPROM_REG_ADDR_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_EEPROM_REG_ADDR_BASE)
+#define HS_E2P_CMD (HS_EEPROM_REG_ADDR_BASE + 0x0000)
+#define HS_E2P_CMD_EPC_BUSY_ BIT(31)
+#define HS_E2P_CMD_EPC_CMD_WRITE_ GENMASK(29, 28)
+#define HS_E2P_CMD_EPC_CMD_READ_ (0x0)
+#define HS_E2P_CMD_EPC_TIMEOUT_ BIT(17)
+#define HS_E2P_CMD_EPC_ADDR_MASK_ GENMASK(15, 0)
+#define HS_E2P_DATA (HS_EEPROM_REG_ADDR_BASE + 0x0004)
+#define HS_E2P_DATA_MASK_ GENMASK(7, 0)
+#define HS_E2P_CFG (HS_EEPROM_REG_ADDR_BASE + 0x0008)
+#define HS_E2P_CFG_I2C_PULSE_MASK_ GENMASK(19, 16)
+#define HS_E2P_CFG_EEPROM_SIZE_SEL_ BIT(12)
+#define HS_E2P_CFG_I2C_BAUD_RATE_MASK_ GENMASK(9, 8)
+#define HS_E2P_CFG_TEST_EEPR_TO_BYP_ BIT(0)
+#define HS_E2P_PAD_CTL (HS_EEPROM_REG_ADDR_BASE + 0x000C)
+
#define GPIO_CFG0 (0x050)
#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit))
#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit))
@@ -135,6 +185,13 @@
#define MAC_RX_ADDRL (0x11C)
#define MAC_MII_ACC (0x120)
+#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16)
+#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000)
+#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0)
+#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1)
+#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2)
+#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3)
+#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4)
#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11)
#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6)
@@ -143,6 +200,15 @@
#define MAC_MII_ACC_MII_WRITE_ (0x00000002)
#define MAC_MII_ACC_MII_BUSY_ BIT(0)
+#define MAC_MII_ACC_MIIMMD_SHIFT_ (6)
+#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0)
+#define MAC_MII_ACC_MIICL45_ BIT(3)
+#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006)
+#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000)
+#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002)
+#define MAC_MII_ACC_MIICMD_READ_ (0x00000004)
+#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006)
+
#define MAC_MII_DATA (0x124)
#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
@@ -214,6 +280,11 @@
#define MAC_WUCSR2 (0x600)
+#define SGMII_CTL (0x728)
+#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
+#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
+#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
@@ -261,8 +332,11 @@
#define INT_MOD_CFG5 (0x7D4)
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define INT_MOD_CFG8 (0x7E0)
+#define INT_MOD_CFG9 (0x7E4)
#define PTP_CMD_CTL (0x0A00)
+#define PTP_CMD_CTL_PTP_LTC_TARGET_READ_ BIT(13)
#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
@@ -284,9 +358,51 @@
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
+#define HS_PTP_GENERAL_CONFIG (0x0A04)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \
+ (0xf << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_ (1)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_ (2)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_ (3)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (4)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_ (5)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (6)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_ (7)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (8)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_ (9)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (10)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_ (11)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_ (12)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (13)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGG_ (14)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_ (15)
+#define HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
+ (((value) & 0xf) << (4 + ((channel) << 2)))
+#define HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(channel) (BIT(1 + ((channel) * 2)))
+#define HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) * 2))
+
#define PTP_INT_STS (0x0A08)
+#define PTP_INT_IO_FE_MASK_ GENMASK(31, 24)
+#define PTP_INT_IO_FE_SHIFT_ (24)
+#define PTP_INT_IO_FE_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_IO_RE_MASK_ GENMASK(23, 16)
+#define PTP_INT_IO_RE_SHIFT_ (16)
+#define PTP_INT_IO_RE_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_TX_TS_OVRFL_INT_ BIT(14)
+#define PTP_INT_TX_SWTS_ERR_INT_ BIT(13)
+#define PTP_INT_TX_TS_INT_ BIT(12)
+#define PTP_INT_RX_TS_OVRFL_INT_ BIT(9)
+#define PTP_INT_RX_TS_INT_ BIT(8)
+#define PTP_INT_TIMER_INT_B_ BIT(1)
+#define PTP_INT_TIMER_INT_A_ BIT(0)
#define PTP_INT_EN_SET (0x0A0C)
+#define PTP_INT_EN_FE_EN_SET_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_SET_(channel) BIT(16 + (channel))
+#define PTP_INT_EN_TIMER_SET_(channel) BIT(channel)
#define PTP_INT_EN_CLR (0x0A10)
+#define PTP_INT_EN_FE_EN_CLR_(channel) BIT(24 + (channel))
+#define PTP_INT_EN_RE_EN_CLR_(channel) BIT(16 + (channel))
#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13)
#define PTP_INT_BIT_TX_TS_ BIT(12)
#define PTP_INT_BIT_TIMER_B_ BIT(1)
@@ -304,6 +420,16 @@
#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4))
#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4))
#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4))
+#define PTP_LTC_SET_SEC_HI (0x0A50)
+#define PTP_LTC_SET_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_VERSION (0x0A54)
+#define PTP_VERSION_TX_UP_MASK_ GENMASK(31, 24)
+#define PTP_VERSION_TX_LO_MASK_ GENMASK(23, 16)
+#define PTP_VERSION_RX_UP_MASK_ GENMASK(15, 8)
+#define PTP_VERSION_RX_LO_MASK_ GENMASK(7, 0)
+#define PTP_IO_SEL (0x0A58)
+#define PTP_IO_SEL_MASK_ GENMASK(10, 8)
+#define PTP_IO_SEL_SHIFT_ (8)
#define PTP_LATENCY (0x0A5C)
#define PTP_LATENCY_TX_SET_(tx_latency) (((u32)(tx_latency)) << 16)
#define PTP_LATENCY_RX_SET_(rx_latency) \
@@ -328,6 +454,59 @@
#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000)
#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000)
+#define PTP_TX_CAP_INFO (0x0AB8)
+#define PTP_TX_CAP_INFO_TX_CH_MASK_ GENMASK(1, 0)
+#define PTP_TX_DOMAIN (0x0ABC)
+#define PTP_TX_DOMAIN_MASK_ GENMASK(23, 16)
+#define PTP_TX_DOMAIN_RANGE_EN_ BIT(15)
+#define PTP_TX_DOMAIN_RANGE_MASK_ GENMASK(7, 0)
+#define PTP_TX_SDOID (0x0AC0)
+#define PTP_TX_SDOID_MASK_ GENMASK(23, 16)
+#define PTP_TX_SDOID_RANGE_EN_ BIT(15)
+#define PTP_TX_SDOID_11_0_MASK_ GENMASK(7, 0)
+#define PTP_IO_CAP_CONFIG (0x0AC4)
+#define PTP_IO_CAP_CONFIG_LOCK_FE_(channel) BIT(24 + (channel))
+#define PTP_IO_CAP_CONFIG_LOCK_RE_(channel) BIT(16 + (channel))
+#define PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel) BIT(8 + (channel))
+#define PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_RE_LTC_SEC_CAP_X (0x0AC8)
+#define PTP_IO_RE_LTC_NS_CAP_X (0x0ACC)
+#define PTP_IO_FE_LTC_SEC_CAP_X (0x0AD0)
+#define PTP_IO_FE_LTC_NS_CAP_X (0x0AD4)
+#define PTP_IO_EVENT_OUTPUT_CFG (0x0AD8)
+#define PTP_IO_EVENT_OUTPUT_CFG_SEL_(channel) BIT(16 + (channel))
+#define PTP_IO_EVENT_OUTPUT_CFG_EN_(channel) BIT(0 + (channel))
+#define PTP_IO_PIN_CFG (0x0ADC)
+#define PTP_IO_PIN_CFG_OBUF_TYPE_(channel) BIT(0 + (channel))
+#define PTP_LTC_RD_SEC_HI (0x0AF0)
+#define PTP_LTC_RD_SEC_HI_SEC_47_32_MASK_ GENMASK(15, 0)
+#define PTP_LTC_RD_SEC_LO (0x0AF4)
+#define PTP_LTC_RD_NS (0x0AF8)
+#define PTP_LTC_RD_NS_29_0_MASK_ GENMASK(29, 0)
+#define PTP_LTC_RD_SUBNS (0x0AFC)
+#define PTP_RX_USER_MAC_HI (0x0B00)
+#define PTP_RX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_RX_USER_MAC_LO (0x0B04)
+#define PTP_RX_USER_IP_ADDR_0 (0x0B20)
+#define PTP_RX_USER_IP_ADDR_1 (0x0B24)
+#define PTP_RX_USER_IP_ADDR_2 (0x0B28)
+#define PTP_RX_USER_IP_ADDR_3 (0x0B2C)
+#define PTP_RX_USER_IP_MASK_0 (0x0B30)
+#define PTP_RX_USER_IP_MASK_1 (0x0B34)
+#define PTP_RX_USER_IP_MASK_2 (0x0B38)
+#define PTP_RX_USER_IP_MASK_3 (0x0B3C)
+#define PTP_TX_USER_MAC_HI (0x0B40)
+#define PTP_TX_USER_MAC_HI_47_32_MASK_ GENMASK(15, 0)
+#define PTP_TX_USER_MAC_LO (0x0B44)
+#define PTP_TX_USER_IP_ADDR_0 (0x0B60)
+#define PTP_TX_USER_IP_ADDR_1 (0x0B64)
+#define PTP_TX_USER_IP_ADDR_2 (0x0B68)
+#define PTP_TX_USER_IP_ADDR_3 (0x0B6C)
+#define PTP_TX_USER_IP_MASK_0 (0x0B70)
+#define PTP_TX_USER_IP_MASK_1 (0x0B74)
+#define PTP_TX_USER_IP_MASK_2 (0x0B78)
+#define PTP_TX_USER_IP_MASK_3 (0x0B7C)
+
#define DMAC_CFG (0xC00)
#define DMAC_CFG_COAL_EN_ BIT(16)
#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000)
@@ -483,6 +662,20 @@
#define OTP_STATUS (0x1030)
#define OTP_STATUS_BUSY_ BIT(0)
+/* Hearthstone OTP block registers */
+#define HS_OTP_BLOCK_BASE (ETH_SYS_REG_ADDR_BASE + \
+ ETH_OTP_REG_ADDR_BASE)
+#define HS_OTP_PWR_DN (HS_OTP_BLOCK_BASE + 0x0)
+#define HS_OTP_ADDR_HIGH (HS_OTP_BLOCK_BASE + 0x4)
+#define HS_OTP_ADDR_LOW (HS_OTP_BLOCK_BASE + 0x8)
+#define HS_OTP_PRGM_DATA (HS_OTP_BLOCK_BASE + 0x10)
+#define HS_OTP_PRGM_MODE (HS_OTP_BLOCK_BASE + 0x14)
+#define HS_OTP_READ_DATA (HS_OTP_BLOCK_BASE + 0x18)
+#define HS_OTP_FUNC_CMD (HS_OTP_BLOCK_BASE + 0x20)
+#define HS_OTP_TST_CMD (HS_OTP_BLOCK_BASE + 0x24)
+#define HS_OTP_CMD_GO (HS_OTP_BLOCK_BASE + 0x28)
+#define HS_OTP_STATUS (HS_OTP_BLOCK_BASE + 0x30)
+
/* MAC statistics registers */
#define STAT_RX_FCS_ERRORS (0x1200)
#define STAT_RX_ALIGNMENT_ERRORS (0x1204)
@@ -541,10 +734,12 @@
#define LAN743X_MAX_RX_CHANNELS (4)
#define LAN743X_MAX_TX_CHANNELS (1)
+#define PCI11X1X_MAX_TX_CHANNELS (4)
struct lan743x_adapter;
#define LAN743X_USED_RX_CHANNELS (4)
#define LAN743X_USED_TX_CHANNELS (1)
+#define PCI11X1X_USED_TX_CHANNELS (4)
#define LAN743X_INT_MOD (400)
#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS)
@@ -553,12 +748,17 @@ struct lan743x_adapter;
#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS)
#error Invalid LAN743X_USED_TX_CHANNELS
#endif
+#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS)
+#error Invalid PCI11X1X_USED_TX_CHANNELS
+#endif
/* PCI */
/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
+#define PCI_DEVICE_ID_SMSC_A011 (0xA011)
+#define PCI_DEVICE_ID_SMSC_A041 (0xA041)
#define PCI_CONFIG_LENGTH (0x1000)
@@ -607,13 +807,14 @@ struct lan743x_vector {
};
#define LAN743X_MAX_VECTOR_COUNT (8)
+#define PCI11X1X_MAX_VECTOR_COUNT (16)
struct lan743x_intr {
int flags;
unsigned int irq;
- struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT];
+ struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT];
int number_of_vectors;
bool using_vectors;
@@ -668,6 +869,7 @@ struct lan743x_tx {
int last_tail;
struct napi_struct napi;
+ u32 frame_count;
struct sk_buff *overflow_skb;
};
@@ -721,8 +923,17 @@ struct lan743x_adapter {
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
- struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS];
- struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS];
+ struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS];
+ struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS];
+ bool is_pci11x1x;
+ bool is_sgmii_en;
+ /* protect ethernet syslock */
+ spinlock_t eth_syslock_spinlock;
+ bool eth_syslock_en;
+ u32 eth_syslock_acquire_cnt;
+ u8 max_tx_channels;
+ u8 used_tx_channels;
+ u8 max_vector_count;
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 8b7a8d879083..6a11e2ceb013 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -25,6 +25,18 @@ static void lan743x_ptp_clock_set(struct lan743x_adapter *adapter,
u32 seconds, u32 nano_seconds,
u32 sub_nano_seconds);
+static int lan743x_get_channel(u32 ch_map)
+{
+ int idx;
+
+ for (idx = 0; idx < 32; idx++) {
+ if (ch_map & (0x1 << idx))
+ return idx;
+ }
+
+ return -EINVAL;
+}
+
int lan743x_gpio_init(struct lan743x_adapter *adapter)
{
struct lan743x_gpio *gpio = &adapter->gpio;
@@ -179,6 +191,8 @@ static void lan743x_ptp_release_event_ch(struct lan743x_adapter *adapter,
static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
u32 *seconds, u32 *nano_seconds,
u32 *sub_nano_seconds);
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec);
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns);
@@ -407,7 +421,11 @@ static int lan743x_ptpci_gettime64(struct ptp_clock_info *ptpci,
u32 nano_seconds = 0;
u32 seconds = 0;
- lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &seconds, &nano_seconds,
+ NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &seconds, &nano_seconds, NULL);
ts->tv_sec = seconds;
ts->tv_nsec = nano_seconds;
@@ -671,6 +689,322 @@ failed:
return ret;
}
+static void lan743x_ptp_io_perout_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ int val;
+
+ event_ch = ptp->ptp_io_perout[index];
+ if (event_ch >= 0) {
+ /* set target to far in the future, effectively disabling it */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ 0);
+
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_
+ (event_ch));
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch);
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+ if (event_ch)
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_B_);
+ else
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_TIMER_INT_A_);
+ lan743x_ptp_release_event_ch(adapter, event_ch);
+ ptp->ptp_io_perout[index] = -1;
+ }
+
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+
+ /* Deselect Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+
+ /* Disables the output of Local Time Target compare events */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ /* Configured as an opendrain driver*/
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val &= ~PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+ /* Dummy read to make sure write operation success */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+}
+
+static int lan743x_ptp_io_perout(struct lan743x_adapter *adapter, int on,
+ struct ptp_perout_request *perout_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 period_sec, period_nsec;
+ u32 start_sec, start_nsec;
+ u32 pulse_sec, pulse_nsec;
+ int pulse_width;
+ int perout_pin;
+ int event_ch;
+ u32 gen_cfg;
+ u32 index;
+ int val;
+
+ index = perout_request->index;
+ event_ch = ptp->ptp_io_perout[index];
+
+ if (on) {
+ perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, index);
+ if (perout_pin < 0)
+ return -EBUSY;
+ } else {
+ lan743x_ptp_io_perout_off(adapter, index);
+ return 0;
+ }
+
+ if (event_ch >= LAN743X_PTP_N_EVENT_CHAN) {
+ /* already on, turn off first */
+ lan743x_ptp_io_perout_off(adapter, index);
+ }
+
+ event_ch = lan743x_ptp_reserve_event_ch(adapter, index);
+ if (event_ch < 0) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "Failed to reserve event channel %d for PEROUT\n",
+ index);
+ goto failed;
+ }
+ ptp->ptp_io_perout[index] = event_ch;
+
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_sec = perout_request->on.sec;
+ pulse_sec += perout_request->on.nsec / 1000000000;
+ pulse_nsec = perout_request->on.nsec % 1000000000;
+ } else {
+ pulse_sec = perout_request->period.sec;
+ pulse_sec += perout_request->period.nsec / 1000000000;
+ pulse_nsec = perout_request->period.nsec % 1000000000;
+ }
+
+ if (pulse_sec == 0) {
+ if (pulse_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (pulse_nsec >= 200000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100MS_;
+ } else if (pulse_nsec >= 100000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50MS_;
+ } else if (pulse_nsec >= 20000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (pulse_nsec >= 10000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5MS_;
+ } else if (pulse_nsec >= 2000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (pulse_nsec >= 1000000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500US_;
+ } else if (pulse_nsec >= 200000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (pulse_nsec >= 100000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_50US_;
+ } else if (pulse_nsec >= 20000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (pulse_nsec >= 10000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_5US_;
+ } else if (pulse_nsec >= 2000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_1US_;
+ } else if (pulse_nsec >= 1000) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_500NS_;
+ } else if (pulse_nsec >= 200) {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, min is 200nS\n");
+ goto failed;
+ }
+ } else {
+ pulse_width = HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
+
+ /* turn off by setting target far in future */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ 0xFFFF0000);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch), 0);
+
+ /* Configure to pulse every period */
+ gen_cfg = lan743x_csr_read(adapter, HS_PTP_GENERAL_CONFIG);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(event_ch));
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_
+ (event_ch, pulse_width);
+ gen_cfg |= HS_PTP_GENERAL_CONFIG_EVENT_POL_X_(event_ch);
+ gen_cfg &= ~(HS_PTP_GENERAL_CONFIG_RELOAD_ADD_X_(event_ch));
+ lan743x_csr_write(adapter, HS_PTP_GENERAL_CONFIG, gen_cfg);
+
+ /* set the reload to one toggle cycle */
+ period_sec = perout_request->period.sec;
+ period_sec += perout_request->period.nsec / 1000000000;
+ period_nsec = perout_request->period.nsec % 1000000000;
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_SEC_X(event_ch),
+ period_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_RELOAD_NS_X(event_ch),
+ period_nsec);
+
+ start_sec = perout_request->start.sec;
+ start_sec += perout_request->start.nsec / 1000000000;
+ start_nsec = perout_request->start.nsec % 1000000000;
+
+ /* set the start time */
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_SEC_X(event_ch),
+ start_sec);
+ lan743x_csr_write(adapter,
+ PTP_CLOCK_TARGET_NS_X(event_ch),
+ start_nsec);
+
+ /* Enable LTC Target Read */
+ val = lan743x_csr_read(adapter, PTP_CMD_CTL);
+ val |= PTP_CMD_CTL_PTP_LTC_TARGET_READ_;
+ lan743x_csr_write(adapter, PTP_CMD_CTL, val);
+
+ /* Configure as an push/pull driver */
+ val = lan743x_csr_read(adapter, PTP_IO_PIN_CFG);
+ val |= PTP_IO_PIN_CFG_OBUF_TYPE_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_PIN_CFG, val);
+
+ /* Select Event output */
+ val = lan743x_csr_read(adapter, PTP_IO_EVENT_OUTPUT_CFG);
+ if (event_ch)
+ /* Channel B as the output */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+ else
+ /* Channel A as the output */
+ val &= ~PTP_IO_EVENT_OUTPUT_CFG_SEL_(perout_pin);
+
+ /* Enables the output of Local Time Target compare events */
+ val |= PTP_IO_EVENT_OUTPUT_CFG_EN_(perout_pin);
+ lan743x_csr_write(adapter, PTP_IO_EVENT_OUTPUT_CFG, val);
+
+ return 0;
+
+failed:
+ lan743x_ptp_io_perout_off(adapter, index);
+ return -ENODEV;
+}
+
+static void lan743x_ptp_io_extts_off(struct lan743x_adapter *adapter,
+ u32 index)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ int val;
+
+ extts = &ptp->extts[index];
+ /* PTP Interrupt Enable Clear Register */
+ if (extts->flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_CLR_(index);
+ else
+ val = PTP_INT_EN_RE_EN_CLR_(index);
+ lan743x_csr_write(adapter, PTP_INT_EN_CLR, val);
+
+ /* Disables PTP-IO edge lock */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (extts->flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(index);
+ } else {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(index);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(index);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO De-select register */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* Clear timestamp */
+ memset(&extts->ts, 0, sizeof(struct timespec64));
+ extts->flags = 0;
+}
+
+static int lan743x_ptp_io_event_cap_en(struct lan743x_adapter *adapter,
+ u32 flags, u32 channel)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ int val;
+
+ if ((flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp->command_lock);
+ /* PTP-IO Event Capture Enable */
+ val = lan743x_csr_read(adapter, PTP_IO_CAP_CONFIG);
+ if (flags & PTP_FALLING_EDGE) {
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val |= PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ } else {
+ /* Rising eventing as Default */
+ val &= ~PTP_IO_CAP_CONFIG_LOCK_FE_(channel);
+ val &= ~PTP_IO_CAP_CONFIG_FE_CAP_EN_(channel);
+ val |= PTP_IO_CAP_CONFIG_LOCK_RE_(channel);
+ val |= PTP_IO_CAP_CONFIG_RE_CAP_EN_(channel);
+ }
+ lan743x_csr_write(adapter, PTP_IO_CAP_CONFIG, val);
+
+ /* PTP-IO Select */
+ val = lan743x_csr_read(adapter, PTP_IO_SEL);
+ val &= ~PTP_IO_SEL_MASK_;
+ val |= channel << PTP_IO_SEL_SHIFT_;
+ lan743x_csr_write(adapter, PTP_IO_SEL, val);
+
+ /* PTP Interrupt Enable Register */
+ if (flags & PTP_FALLING_EDGE)
+ val = PTP_INT_EN_FE_EN_SET_(channel);
+ else
+ val = PTP_INT_EN_RE_EN_SET_(channel);
+ lan743x_csr_write(adapter, PTP_INT_EN_SET, val);
+
+ mutex_unlock(&ptp->command_lock);
+
+ return 0;
+}
+
+static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
+ struct ptp_extts_request *extts_request)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ u32 flags = extts_request->flags;
+ u32 index = extts_request->index;
+ struct lan743x_extts *extts;
+ int extts_pin;
+ int ret = 0;
+
+ extts = &ptp->extts[index];
+
+ if (on) {
+ extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
+ if (extts_pin < 0)
+ return -EBUSY;
+
+ ret = lan743x_ptp_io_event_cap_en(adapter, flags, index);
+ if (!ret)
+ extts->flags = flags;
+ } else {
+ lan743x_ptp_io_extts_off(adapter, index);
+ }
+
+ return ret;
+}
+
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
struct ptp_clock_request *request, int on)
{
@@ -682,11 +1016,19 @@ static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
if (request) {
switch (request->type) {
case PTP_CLK_REQ_EXTTS:
+ if (request->extts.index < ptpci->n_ext_ts)
+ return lan743x_ptp_io_extts(adapter, on,
+ &request->extts);
return -EINVAL;
case PTP_CLK_REQ_PEROUT:
- if (request->perout.index < ptpci->n_per_out)
- return lan743x_ptp_perout(adapter, on,
+ if (request->perout.index < ptpci->n_per_out) {
+ if (adapter->is_pci11x1x)
+ return lan743x_ptp_io_perout(adapter, on,
+ &request->perout);
+ else
+ return lan743x_ptp_perout(adapter, on,
&request->perout);
+ }
return -EINVAL;
case PTP_CLK_REQ_PPS:
return -EINVAL;
@@ -715,8 +1057,8 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
- break;
case PTP_PF_EXTTS:
+ break;
case PTP_PF_PHYSYNC:
default:
result = -1;
@@ -725,6 +1067,33 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
return result;
}
+static void lan743x_ptp_io_event_clock_get(struct lan743x_adapter *adapter,
+ bool fe, u8 channel,
+ struct timespec64 *ts)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+ struct lan743x_extts *extts;
+ u32 sec, nsec;
+
+ mutex_lock(&ptp->command_lock);
+ if (fe) {
+ sec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_FE_LTC_NS_CAP_X);
+ } else {
+ sec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_SEC_CAP_X);
+ nsec = lan743x_csr_read(adapter, PTP_IO_RE_LTC_NS_CAP_X);
+ }
+
+ mutex_unlock(&ptp->command_lock);
+
+ /* Update Local timestamp */
+ extts = &ptp->extts[channel];
+ extts->ts.tv_sec = sec;
+ extts->ts.tv_nsec = nsec;
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
{
struct lan743x_ptp *ptp =
@@ -733,41 +1102,121 @@ static long lan743x_ptpci_do_aux_work(struct ptp_clock_info *ptpci)
container_of(ptp, struct lan743x_adapter, ptp);
u32 cap_info, cause, header, nsec, seconds;
bool new_timestamp_available = false;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ int ptp_int_sts;
int count = 0;
+ int channel;
+ s64 ns;
- while ((count < 100) &&
- (lan743x_csr_read(adapter, PTP_INT_STS) & PTP_INT_BIT_TX_TS_)) {
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
+ while ((count < 100) && ptp_int_sts) {
count++;
- cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
-
- if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
- seconds = lan743x_csr_read(adapter,
- PTP_TX_EGRESS_SEC);
- nsec = lan743x_csr_read(adapter, PTP_TX_EGRESS_NS);
- cause = (nsec &
- PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
- header = lan743x_csr_read(adapter,
- PTP_TX_MSG_HEADER);
-
- if (cause == PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
- nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
- lan743x_ptp_tx_ts_enqueue_ts(adapter,
- seconds, nsec,
- header);
- new_timestamp_available = true;
- } else if (cause ==
- PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
- netif_err(adapter, drv, adapter->netdev,
- "Auto capture cause not supported\n");
+
+ if (ptp_int_sts & PTP_INT_BIT_TX_TS_) {
+ cap_info = lan743x_csr_read(adapter, PTP_CAP_INFO);
+
+ if (PTP_CAP_INFO_TX_TS_CNT_GET_(cap_info) > 0) {
+ seconds = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_SEC);
+ nsec = lan743x_csr_read(adapter,
+ PTP_TX_EGRESS_NS);
+ cause = (nsec &
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_);
+ header = lan743x_csr_read(adapter,
+ PTP_TX_MSG_HEADER);
+
+ if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_) {
+ nsec &= PTP_TX_EGRESS_NS_TS_NS_MASK_;
+ lan743x_ptp_tx_ts_enqueue_ts(adapter,
+ seconds,
+ nsec,
+ header);
+ new_timestamp_available = true;
+ } else if (cause ==
+ PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Auto capture cause not supported\n");
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "unknown tx timestamp capture cause\n");
+ }
} else {
netif_warn(adapter, drv, adapter->netdev,
- "unknown tx timestamp capture cause\n");
+ "TX TS INT but no TX TS CNT\n");
}
- } else {
- netif_warn(adapter, drv, adapter->netdev,
- "TX TS INT but no TX TS CNT\n");
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_BIT_TX_TS_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_FE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_FE_MASK_) >>
+ PTP_INT_IO_FE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ true,
+ channel,
+ &ts);
+ /* PTP Falling Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_FE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear falling event interrupts */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_FE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_FE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_FE_MASK_);
+ }
+
+ if (ptp_int_sts & PTP_INT_IO_RE_MASK_) {
+ do {
+ channel = lan743x_get_channel((ptp_int_sts &
+ PTP_INT_IO_RE_MASK_) >>
+ PTP_INT_IO_RE_SHIFT_);
+ if (channel >= 0 &&
+ channel < PCI11X1X_PTP_IO_MAX_CHANNELS) {
+ lan743x_ptp_io_event_clock_get(adapter,
+ false,
+ channel,
+ &ts);
+ /* PTP Rising Event post */
+ ns = timespec64_to_ns(&ts);
+ ptp_event.timestamp = ns;
+ ptp_event.index = channel;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(ptp->ptp_clock,
+ &ptp_event);
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_SET_
+ (channel));
+ ptp_int_sts &= ~(1 <<
+ (PTP_INT_IO_RE_SHIFT_ +
+ channel));
+ } else {
+ /* Clear Rising event interrupt */
+ lan743x_csr_write(adapter, PTP_INT_STS,
+ PTP_INT_IO_RE_MASK_);
+ ptp_int_sts &= ~PTP_INT_IO_RE_MASK_;
+ }
+ } while (ptp_int_sts & PTP_INT_IO_RE_MASK_);
}
- lan743x_csr_write(adapter, PTP_INT_STS, PTP_INT_BIT_TX_TS_);
+
+ ptp_int_sts = lan743x_csr_read(adapter, PTP_INT_STS);
}
if (new_timestamp_available)
@@ -802,6 +1251,28 @@ static void lan743x_ptp_clock_get(struct lan743x_adapter *adapter,
mutex_unlock(&ptp->command_lock);
}
+static void lan743x_ptp_io_clock_get(struct lan743x_adapter *adapter,
+ u32 *sec, u32 *nsec, u32 *sub_nsec)
+{
+ struct lan743x_ptp *ptp = &adapter->ptp;
+
+ mutex_lock(&ptp->command_lock);
+ lan743x_csr_write(adapter, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lan743x_ptp_wait_till_cmd_done(adapter, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ if (sec)
+ (*sec) = lan743x_csr_read(adapter, PTP_LTC_RD_SEC_LO);
+
+ if (nsec)
+ (*nsec) = lan743x_csr_read(adapter, PTP_LTC_RD_NS);
+
+ if (sub_nsec)
+ (*sub_nsec) =
+ lan743x_csr_read(adapter, PTP_LTC_RD_SUBNS);
+
+ mutex_unlock(&ptp->command_lock);
+}
+
static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
s64 time_step_ns)
{
@@ -815,8 +1286,12 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan743x_ptp_clock_get(adapter, &unsigned_seconds,
- &nano_seconds, NULL);
+ if (adapter->is_pci11x1x)
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ else
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
&remainder);
nano_seconds += remainder;
@@ -831,8 +1306,13 @@ static void lan743x_ptp_clock_step(struct lan743x_adapter *adapter,
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan743x_ptp_clock_get(adapter, &unsigned_seconds,
- &nano_seconds, NULL);
+ if (adapter->is_pci11x1x) {
+ lan743x_ptp_io_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ } else {
+ lan743x_ptp_clock_get(adapter, &unsigned_seconds,
+ &nano_seconds, NULL);
+ }
unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
&remainder);
nano_seconds_step = remainder;
@@ -1061,6 +1541,8 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
n_pins = LAN7430_N_GPIO;
break;
case ID_REV_ID_LAN7431_:
+ case ID_REV_ID_A011_:
+ case ID_REV_ID_A041_:
n_pins = LAN7431_N_GPIO;
break;
default:
@@ -1088,10 +1570,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
adapter->netdev->dev_addr);
ptp->ptp_clock_info.max_adj = LAN743X_PTP_MAX_FREQ_ADJ_IN_PPB;
ptp->ptp_clock_info.n_alarm = 0;
- ptp->ptp_clock_info.n_ext_ts = 0;
+ ptp->ptp_clock_info.n_ext_ts = LAN743X_PTP_N_EXTTS;
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
- ptp->ptp_clock_info.pps = 0;
+ ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjfreq = lan743x_ptpci_adjfreq;
@@ -1307,21 +1789,21 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
- index++)
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
false, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ON:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, true);
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 7663bf5d2e33..e26d4eff7133 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,6 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
+#define LAN743X_PTP_N_EXTTS 4
+#define LAN743X_PTP_N_PPS 0
+#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
struct lan743x_adapter;
@@ -60,6 +63,11 @@ struct lan743x_ptp_perout {
int gpio_pin; /* GPIO pin where output appears */
};
+struct lan743x_extts {
+ int flags;
+ struct timespec64 ts;
+};
+
struct lan743x_ptp {
int flags;
@@ -72,6 +80,8 @@ struct lan743x_ptp {
unsigned long used_event_ch;
struct lan743x_ptp_perout perout[LAN743X_PTP_N_PEROUT];
+ int ptp_io_perout[LAN743X_PTP_N_PEROUT]; /* PTP event channel (0=channel A, 1=channel B) */
+ struct lan743x_extts extts[LAN743X_PTP_N_EXTTS];
bool leds_multiplexed;
bool led_enabled[LAN7430_N_LED];
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index ac273f84b69e..4241ff0e5098 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -1,5 +1,6 @@
config LAN966X_SWITCH
tristate "Lan966x switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 040cfff9f577..a9ffc719aa0e 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
- lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
+ lan966x_ptp.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 614f12c2fe6a..e58a27fd8b50 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -545,6 +545,39 @@ static int lan966x_set_pauseparam(struct net_device *dev,
return phylink_ethtool_set_pauseparam(port->phylink, pause);
}
+static int lan966x_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ if (!lan966x->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops lan966x_ethtool_ops = {
.get_link_ksettings = lan966x_get_link_ksettings,
.set_link_ksettings = lan966x_set_link_ksettings,
@@ -556,6 +589,7 @@ const struct ethtool_ops lan966x_ethtool_ops = {
.get_eth_mac_stats = lan966x_get_eth_mac_stats,
.get_rmon_stats = lan966x_get_eth_rmon_stats,
.get_link = ethtool_op_get_link,
+ .get_ts_info = lan966x_get_ts_info,
};
static void lan966x_check_stats_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1f60fd125a1d..e1bcb28039dc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -4,11 +4,13 @@
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/iopoll.h>
+#include <linux/ip.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/packing.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <net/addrconf.h>
#include "lan966x_main.h"
@@ -44,6 +46,7 @@ static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
{ TARGET_ORG, 0, 1 }, /* 0xe2000000 */
{ TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
{ TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_PTP, 0xc000, 1 }, /* 0xe200c000 */
{ TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
{ TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
{ TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
@@ -182,6 +185,9 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
+ if (lan_rd(lan966x, QS_INJ_STATUS) & QS_INJ_STATUS_FIFO_RDY_SET(BIT(grp)))
+ return 0;
+
return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
READL_SLEEP_US, READL_TIMEOUT_US);
@@ -201,7 +207,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
val = lan_rd(lan966x, QS_INJ_STATUS);
if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
(QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
- return NETDEV_TX_BUSY;
+ goto err;
/* Write start of frame */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
@@ -213,7 +219,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
}
@@ -225,7 +231,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
}
@@ -235,7 +241,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
/* Wait until the fifo is ready */
err = lan966x_port_inj_ready(lan966x, grp);
if (err)
- return NETDEV_TX_BUSY;
+ goto err;
lan_wr(0, lan966x, QS_INJ_WR(grp));
++i;
@@ -255,8 +261,19 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return NETDEV_TX_OK;
+
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
+
+err:
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ lan966x_ptp_txtstamp_release(port, skb);
+
+ return NETDEV_TX_BUSY;
}
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
@@ -289,10 +306,24 @@ static void lan966x_ifh_set_vid(void *ifh, u64 vid)
IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
}
+static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
+{
+ packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1,
+ IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
+{
+ packing(ifh, &timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
+ IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
+}
+
static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
__be32 ifh[IFH_LEN];
+ int err;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
@@ -302,7 +333,20 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
- return lan966x_port_ifh_xmit(skb, ifh, dev);
+ if (port->lan966x->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ err = lan966x_ptp_txtstamp_request(port, skb);
+ if (err)
+ return err;
+
+ lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
+ }
+
+ spin_lock(&lan966x->tx_lock);
+ err = lan966x_port_ifh_xmit(skb, ifh, dev);
+ spin_unlock(&lan966x->tx_lock);
+
+ return err;
}
static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
@@ -350,6 +394,23 @@ static int lan966x_port_get_parent_id(struct net_device *dev,
return 0;
}
+static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return lan966x_ptp_hwtstamp_set(port, ifr);
+ case SIOCGHWTSTAMP:
+ return lan966x_ptp_hwtstamp_get(port, ifr);
+ }
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_open = lan966x_port_open,
.ndo_stop = lan966x_port_stop,
@@ -360,6 +421,7 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_get_stats64 = lan966x_stats_get,
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
+ .ndo_eth_ioctl = lan966x_port_ioctl,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -367,6 +429,33 @@ bool lan966x_netdevice_check(const struct net_device *dev)
return dev->netdev_ops == &lan966x_port_netdev_ops;
}
+static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
+ struct sk_buff *skb)
+{
+ u32 val;
+
+ /* The IGMP and MLD frames are not forward by the HW if
+ * multicast snooping is enabled, therefor don't mark as
+ * offload to allow the SW to forward the frames accordingly.
+ */
+ val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
+ if (!(val & (ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_IGMP)
+ return false;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
+ !ipv6_mc_check_mld(skb))
+ return false;
+
+ return true;
+}
+
static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
{
return lan_rd(lan966x, QS_XTR_RD(grp));
@@ -434,6 +523,12 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len)
IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
}
+static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
+{
+ packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
+ IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
+}
+
static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
{
struct lan966x *lan966x = args;
@@ -443,10 +538,10 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
return IRQ_NONE;
do {
+ u64 src_port, len, timestamp;
struct net_device *dev;
struct sk_buff *skb;
int sz = 0, buf_len;
- u64 src_port, len;
u32 ifh[IFH_LEN];
u32 *buf;
u32 val;
@@ -461,6 +556,7 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
lan966x_ifh_get_src_port(ifh, &src_port);
lan966x_ifh_get_len(ifh, &len);
+ lan966x_ifh_get_timestamp(ifh, &timestamp);
WARN_ON(src_port >= lan966x->num_phys_ports);
@@ -501,12 +597,20 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
*buf = val;
}
+ lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
skb->protocol = eth_type_trans(skb, dev);
- if (lan966x->bridge_mask & BIT(src_port))
+ if (lan966x->bridge_mask & BIT(src_port)) {
skb->offload_fwd_mark = 1;
- netif_rx_ni(skb);
+ skb_reset_network_header(skb);
+ if (!lan966x_hw_offload(lan966x, src_port, skb))
+ skb->offload_fwd_mark = 0;
+ }
+
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
+
dev->stats.rx_bytes += len;
dev->stats.rx_packets++;
@@ -628,7 +732,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
}
port->phylink = phylink;
- phylink_set_pcs(phylink, &port->phylink_pcs);
err = register_netdev(dev);
if (err) {
@@ -708,7 +811,7 @@ static void lan966x_init(struct lan966x *lan966x)
/* Setup flooding PGIDs */
lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
- ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
lan966x, ANA_FLOODING_IPMC);
@@ -770,6 +873,10 @@ static void lan966x_init(struct lan966x *lan966x)
ANA_PGID_PGID,
lan966x, ANA_PGID(PGID_MCIPV4));
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV6));
+
/* Unicast to all other ports */
lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
ANA_PGID_PGID,
@@ -786,6 +893,8 @@ static void lan966x_init(struct lan966x *lan966x)
lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
ANA_ANAINTR_INTR_ENA,
lan966x, ANA_ANAINTR);
+
+ spin_lock_init(&lan966x->tx_lock);
}
static int lan966x_ram_init(struct lan966x *lan966x)
@@ -897,6 +1006,17 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
}
+ lan966x->ptp_irq = platform_get_irq_byname(pdev, "ptp");
+ if (lan966x->ptp_irq > 0) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ptp_irq, NULL,
+ lan966x_ptp_irq_handler, IRQF_ONESHOT,
+ "ptp irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ptp irq");
+
+ lan966x->ptp = 1;
+ }
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -931,8 +1051,15 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
goto cleanup_ports;
+ err = lan966x_ptp_init(lan966x);
+ if (err)
+ goto cleanup_fdb;
+
return 0;
+cleanup_fdb:
+ lan966x_fdb_deinit(lan966x);
+
cleanup_ports:
fwnode_handle_put(portnp);
@@ -958,6 +1085,7 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
lan966x_fdb_deinit(lan966x);
+ lan966x_ptp_deinit(lan966x);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 99c6d0a9f946..ae282da1da74 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -8,6 +8,7 @@
#include <linux/jiffies.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/ptp_clock_kernel.h>
#include <net/switchdev.h>
#include "lan966x_regs.h"
@@ -50,6 +51,13 @@
#define LAN966X_SPEED_100 2
#define LAN966X_SPEED_10 3
+#define LAN966X_PHC_COUNT 3
+#define LAN966X_PHC_PORT 0
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -70,6 +78,24 @@ struct lan966x_stat_layout {
char name[ETH_GSTRING_LEN];
};
+struct lan966x_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct hwtstamp_config hwtstamp_config;
+ struct lan966x *lan966x;
+ u8 index;
+};
+
+struct lan966x_skb_cb {
+ u8 rew_op;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define LAN966X_PTP_TIMEOUT msecs_to_jiffies(10)
+#define LAN966X_SKB_CB(skb) \
+ ((struct lan966x_skb_cb *)((skb)->cb))
+
struct lan966x {
struct device *dev;
@@ -82,6 +108,8 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
+ spinlock_t tx_lock; /* lock for frame transmition */
+
struct net_device *bridge;
u16 bridge_mask;
u16 bridge_fwd_mask;
@@ -105,6 +133,7 @@ struct lan966x {
/* interrupts */
int xtr_irq;
int ana_irq;
+ int ptp_irq;
/* worqueue for fdb */
struct workqueue_struct *fdb_work;
@@ -113,6 +142,14 @@ struct lan966x {
/* mdb */
struct list_head mdb_entries;
struct list_head pgid_entries;
+
+ /* ptp */
+ bool ptp;
+ struct lan966x_phc phc[LAN966X_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
};
struct lan966x_port_config {
@@ -135,6 +172,7 @@ struct lan966x_port {
bool vlan_aware;
bool learn_ena;
+ bool mcast_ena;
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
@@ -142,6 +180,10 @@ struct lan966x_port {
struct phylink *phylink;
struct phy *serdes;
struct fwnode_handle *fwnode;
+
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
@@ -227,6 +269,20 @@ int lan966x_handle_port_mdb_del(struct lan966x_port *port,
const struct switchdev_obj *obj);
void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_clear_entries(struct lan966x *lan966x);
+void lan966x_mdb_restore_entries(struct lan966x *lan966x);
+
+int lan966x_ptp_init(struct lan966x *lan966x);
+void lan966x_ptp_deinit(struct lan966x *lan966x);
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr);
+int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr);
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 timestamp);
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb);
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb);
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
index c68d0a99d292..2af55268bf4d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -504,3 +504,48 @@ void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
}
}
+
+void lan966x_mdb_clear_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ /* Remove just the MAC entry, still keep the PGID in case of L2
+ * entries because this can be restored at later point
+ */
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+}
+
+void lan966x_mdb_restore_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ type = lan966x_mdb_classify(mdb_entry->mac);
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) {
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mac_ip_learn(lan966x, cpu_copy, mac,
+ mdb_entry->vid, type);
+ } else {
+ lan966x_mac_learn(lan966x, mdb_entry->pgid->index,
+ mdb_entry->mac,
+ mdb_entry->vid, type);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index b66a9aa00ea4..38a7e95d69b4 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -9,6 +9,14 @@
#include "lan966x_main.h"
+static struct phylink_pcs *lan966x_phylink_mac_select(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
static void lan966x_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -114,6 +122,7 @@ static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
const struct phylink_mac_ops lan966x_phylink_mac_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = lan966x_phylink_mac_select,
.mac_config = lan966x_phylink_mac_config,
.mac_prepare = lan966x_phylink_mac_prepare,
.mac_link_down = lan966x_phylink_mac_link_down,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
new file mode 100644
index 000000000000..ae782778d6dd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/ptp_classify.h>
+
+#include "lan966x_main.h"
+
+#define LAN966X_MAX_PTP_ID 512
+
+/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000)/((2^-59)/6.037735849)
+ */
+#define LAN966X_1PPM_FORMAT 3480517749723LL
+
+/* Represents 1ppb adjustment in 2^29 format with 6.037735849ns as reference
+ * The value is calculated as following: (1/1000000000)/((2^59)/6.037735849)
+ */
+#define LAN966X_1PPB_FORMAT 3480517749LL
+
+#define TOD_ACC_PIN 0x5
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 lan966x_ptp_get_nominal_value(void)
+{
+ u64 res = 0x304d2df1;
+
+ res <<= 32;
+ return res;
+}
+
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct hwtstamp_config cfg;
+ struct lan966x_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+ if (lan966x->bridge_mask & BIT(port->chip_port))
+ return -EINVAL;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&lan966x->ptp_lock);
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&lan966x->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_phc *phc;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
+ sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP)
+ return IFH_REW_OP_NOOP;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return IFH_REW_OP_NOOP;
+
+ header = ptp_parse_header(skb, type);
+ if (!header)
+ return IFH_REW_OP_NOOP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ return IFH_REW_OP_TWO_STEP_PTP;
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0)
+ return IFH_REW_OP_ONE_STEP_PTP;
+
+ return IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(LAN966X_SKB_CB(skb)->jiffies + LAN966X_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+ u8 rew_op;
+
+ rew_op = lan966x_ptp_classify(port, skb);
+ LAN966X_SKB_CB(skb)->rew_op = rew_op;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ lan966x_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ if (lan966x->ptp_skbs == LAN966X_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ LAN966X_SKB_CB(skb)->ts_id = port->ts_id;
+ LAN966X_SKB_CB(skb)->jiffies = jiffies;
+
+ lan966x->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == LAN966X_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
+ struct sk_buff *skb)
+{
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ lan966x->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+}
+
+static void lan966x_get_hwtimestamp(struct lan966x *lan966x,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(LAN966X_PHC_PORT) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+}
+
+irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = LAN966X_MAX_PTP_ID;
+ struct lan966x *lan966x = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct lan966x_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = lan966x->ports[txport];
+
+ /* Retrieve the delay */
+ delay = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+ delay = PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ val = lan_rd(lan966x, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & PTP_TWOSTEP_CTRL_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = lan_rd(lan966x, PTP_TWOSTEP_STAMP);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (LAN966X_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ lan_rmw(PTP_TWOSTEP_CTRL_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_NXT,
+ lan966x, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&lan966x->ptp_ts_id_lock);
+ lan966x->ptp_skbs--;
+ spin_unlock(&lan966x->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ lan966x_get_hwtimestamp(lan966x, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = lan966x_ptp_get_nominal_value();
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = LAN966X_1PPM_FORMAT * (scaled_ppm >> 16);
+ ref += (LAN966X_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ lan_wr((u32)tod_inc & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ lan_wr((u32)(tod_inc >> 32), lan966x,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ lan_wr(PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ lan_wr(lower_32_bits(ts->tv_sec),
+ lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ lan_wr(ts->tv_nsec, lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int lan966x_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = lan_rd(lan966x, PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = lan_rd(lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_TOD_NSEC_TOD_NSEC;
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
+ struct lan966x *lan966x = phc->lan966x;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&lan966x->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ lan_wr(PTP_TOD_NSEC_TOD_NSEC_SET(delta),
+ lan966x, PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PIN_CFG_PIN_DOM_SET(phc->index) |
+ PTP_PIN_CFG_PIN_SYNC_SET(0),
+ PTP_PIN_CFG_PIN_ACTION |
+ PTP_PIN_CFG_PIN_DOM |
+ PTP_PIN_CFG_PIN_SYNC,
+ lan966x, PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using lan966x_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ lan966x_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ lan966x_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info lan966x_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan966x ptp",
+ .max_adj = 200000,
+ .gettime64 = lan966x_ptp_gettime64,
+ .settime64 = lan966x_ptp_settime64,
+ .adjtime = lan966x_ptp_adjtime,
+ .adjfine = lan966x_ptp_adjfine,
+};
+
+static int lan966x_ptp_phc_init(struct lan966x *lan966x,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct lan966x_phc *phc = &lan966x->phc[index];
+
+ phc->info = *clock_info;
+ phc->clock = ptp_clock_register(&phc->info, lan966x->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->lan966x = lan966x;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int lan966x_ptp_init(struct lan966x *lan966x)
+{
+ u64 tod_adj = lan966x_ptp_get_nominal_value();
+ struct lan966x_port *port;
+ int err, i;
+
+ if (!lan966x->ptp)
+ return 0;
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ err = lan966x_ptp_phc_init(lan966x, i, &lan966x_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&lan966x->ptp_clock_lock);
+ spin_lock_init(&lan966x->ptp_ts_id_lock);
+ mutex_init(&lan966x->ptp_lock);
+
+ /* Disable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0), lan966x, PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0x7),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i) {
+ lan_wr((u32)tod_adj & 0xFFFFFFFF, lan966x,
+ PTP_CLK_PER_CFG(i, 0));
+ lan_wr((u32)(tod_adj >> 32), lan966x,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ lan_rmw(PTP_DOM_CFG_CLKCFG_DIS_SET(0),
+ PTP_DOM_CFG_CLKCFG_DIS,
+ lan966x, PTP_DOM_CFG);
+
+ /* Enable master counters */
+ lan_wr(PTP_DOM_CFG_ENA_SET(0x7), lan966x, PTP_DOM_CFG);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void lan966x_ptp_deinit(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ port = lan966x->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < LAN966X_PHC_COUNT; ++i)
+ ptp_clock_unregister(lan966x->phc[i].clock);
+}
+
+void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan966x_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!lan966x->ptp)
+ return;
+
+ phc = &lan966x->phc[LAN966X_PHC_PORT];
+ lan966x_ptp_gettime64(&phc->info, &ts);
+
+ /* Drop the sub-ns precision */
+ timestamp = timestamp >> 2;
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 797560172aca..0c0b3e173d53 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -19,6 +19,7 @@ enum lan966x_target {
TARGET_DEV = 13,
TARGET_GCB = 27,
TARGET_ORG = 36,
+ TARGET_PTP = 41,
TARGET_QS = 42,
TARGET_QSYS = 46,
TARGET_REW = 47,
@@ -298,6 +299,24 @@ enum lan966x_target {
/* ANA:PORT:CPU_FWD_CFG */
#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x)
+
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x)
+
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
@@ -559,6 +578,108 @@ enum lan966x_target {
#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+/* PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_DOM_CFG_ENA GENMASK(11, 9)
+#define PTP_DOM_CFG_ENA_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_ENA, x)
+#define PTP_DOM_CFG_ENA_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_ENA, x)
+
+#define PTP_DOM_CFG_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_DOM_CFG_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_DOM_CFG_CLKCFG_DIS, x)
+#define PTP_DOM_CFG_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_DOM_CFG_CLKCFG_DIS, x)
+
+/* PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 528, g, 3, 28, 0, r, 2, 4)
+
+/* PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 0, 0, 1, 4)
+
+#define PTP_PIN_CFG_PIN_ACTION GENMASK(29, 27)
+#define PTP_PIN_CFG_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_ACTION, x)
+#define PTP_PIN_CFG_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_ACTION, x)
+
+#define PTP_PIN_CFG_PIN_SYNC GENMASK(26, 25)
+#define PTP_PIN_CFG_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_SYNC, x)
+#define PTP_PIN_CFG_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x)
+
+#define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16)
+#define PTP_PIN_CFG_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x)
+#define PTP_PIN_CFG_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PIN_CFG_PIN_DOM, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 4, 0, 1, 4)
+
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+#define PTP_TOD_SEC_MSB_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_TOD_SEC_MSB_TOD_SEC_MSB, x)
+
+/* PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 8, 0, 1, 4)
+
+/* PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 8, 64, 12, 0, 1, 4)
+
+#define PTP_TOD_NSEC_TOD_NSEC GENMASK(29, 0)
+#define PTP_TOD_NSEC_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_TOD_NSEC_TOD_NSEC, x)
+#define PTP_TOD_NSEC_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_NXT, x)
+#define PTP_TWOSTEP_CTRL_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_VLD, x)
+#define PTP_TWOSTEP_CTRL_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_OVFL, x)
+#define PTP_TWOSTEP_CTRL_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_OVFL, x)
+
+/* PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP */
+#define PTP_TWOSTEP_STAMP __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(31, 2)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
/* DEVCPU_QS:XTR:XTR_GRP_CFG */
#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 7de55f6a4da8..e3555c94294d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -9,6 +9,37 @@ static struct notifier_block lan966x_netdevice_nb __read_mostly;
static struct notifier_block lan966x_switchdev_nb __read_mostly;
static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
+ u32 pgid_ip)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 flood_mask_ip;
+
+ flood_mask_ip = lan_rd(lan966x, ANA_PGID(pgid_ip));
+ flood_mask_ip = ANA_PGID_PGID_GET(flood_mask_ip);
+
+ /* If mcast snooping is not enabled then use mcast flood mask
+ * to decide to enable multicast flooding or not.
+ */
+ if (!port->mcast_ena) {
+ u32 flood_mask;
+
+ flood_mask = lan_rd(lan966x, ANA_PGID(PGID_MC));
+ flood_mask = ANA_PGID_PGID_GET(flood_mask);
+
+ if (flood_mask & BIT(port->chip_port))
+ flood_mask_ip |= BIT(port->chip_port);
+ else
+ flood_mask_ip &= ~BIT(port->chip_port);
+ } else {
+ flood_mask_ip &= ~BIT(port->chip_port);
+ }
+
+ lan_rmw(ANA_PGID_PGID_SET(flood_mask_ip),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_ip));
+}
+
static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
bool enabled)
{
@@ -23,6 +54,11 @@ static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
lan_rmw(ANA_PGID_PGID_SET(val),
ANA_PGID_PGID,
port->lan966x, ANA_PGID(PGID_MC));
+
+ if (!port->mcast_ena) {
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+ }
}
static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
@@ -144,6 +180,28 @@ static void lan966x_port_ageing_set(struct lan966x_port *port,
lan966x_mac_set_ageing(port->lan966x, ageing_time);
}
+static void lan966x_port_mc_set(struct lan966x_port *port, bool mcast_ena)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ port->mcast_ena = mcast_ena;
+ if (mcast_ena)
+ lan966x_mdb_restore_entries(lan966x);
+ else
+ lan966x_mdb_clear_entries(lan966x);
+
+ lan_rmw(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(mcast_ena) |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(mcast_ena),
+ ANA_CPU_FWD_CFG_IGMP_REDIR_ENA |
+ ANA_CPU_FWD_CFG_MLD_REDIR_ENA |
+ ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA,
+ lan966x, ANA_CPU_FWD_CFG(port->chip_port));
+
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV4);
+ lan966x_port_set_mcast_ip_flood(port, PGID_MCIPV6);
+}
+
static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -171,6 +229,9 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
lan966x_vlan_port_apply(port);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ lan966x_port_mc_set(port, !attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -358,6 +419,9 @@ static int lan966x_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(ret);
}
+/* We don't offload uppers such as LAG as bridge ports, so every device except
+ * the bridge itself is foreign.
+ */
static bool lan966x_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
@@ -365,10 +429,10 @@ static bool lan966x_foreign_dev_check(const struct net_device *dev,
struct lan966x *lan966x = port->lan966x;
if (netif_is_bridge_master(foreign_dev))
- if (lan966x->bridge != foreign_dev)
- return true;
+ if (lan966x->bridge == foreign_dev)
+ return false;
- return false;
+ return true;
}
static int lan966x_switchdev_event(struct notifier_block *nb,
@@ -388,8 +452,7 @@ static int lan966x_switchdev_event(struct notifier_block *nb,
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
lan966x_netdevice_check,
lan966x_foreign_dev_check,
- lan966x_handle_fdb,
- NULL);
+ lan966x_handle_fdb);
return notifier_from_errno(err);
}
@@ -402,18 +465,6 @@ static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
struct lan966x *lan966x = port->lan966x;
- /* When adding a port to a vlan, we get a callback for the port but
- * also for the bridge. When get the callback for the bridge just bail
- * out. Then when the bridge is added to the vlan, then we get a
- * callback here but in this case the flags has set:
- * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
- * port is added to the vlan, so the broadcast frames and unicast frames
- * with dmac of the bridge should be foward to CPU.
- */
- if (netif_is_bridge_master(obj->orig_dev) &&
- !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
if (!netif_is_bridge_master(obj->orig_dev))
lan966x_vlan_port_add_vlan(port, v->vid,
v->flags & BRIDGE_VLAN_INFO_PVID,
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index c271e86ee292..4402c3ed1dc5 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
- sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o
+ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
+ sparx5_ptp.o sparx5_pgid.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 10b866e9f726..6b0febcb7fa9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1183,6 +1183,39 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
sparx5, ANA_AC_PORT_STAT_CFG(portno, SPX5_PORT_POLICER_DROPS));
}
+static int sparx5_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ if (!sparx5->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+
+ info->phc_index = phc->clock ? ptp_clock_index(phc->clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1194,6 +1227,7 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_mac_stats = sparx5_get_eth_mac_stats,
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
+ .get_ts_info = sparx5_get_ts_info,
};
int sparx_stats_init(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 7436f62fa152..2dc87584023a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -240,6 +240,8 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
skb_pull(skb, IFH_LEN * sizeof(u32));
if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
skb->protocol = eth_type_trans(skb, skb->dev);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 9a8e4f201eb1..35abb3d0ce19 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -186,11 +186,11 @@ bool sparx5_mact_getnext(struct sparx5 *sparx5,
return ret == 0;
}
-static int sparx5_mact_lookup(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN],
- u16 vid)
+bool sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
{
int ret;
+ u32 cfg2;
mutex_lock(&sparx5->lock);
@@ -202,16 +202,29 @@ static int sparx5_mact_lookup(struct sparx5 *sparx5,
sparx5, LRN_COMMON_ACCESS_CTRL);
ret = sparx5_mact_wait_for_completion(sparx5);
- if (ret)
- goto out;
-
- ret = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET
- (spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2));
+ if (ret == 0) {
+ cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
+ if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
+ *pcfg2 = cfg2;
+ else
+ ret = -ENOENT;
+ }
-out:
mutex_unlock(&sparx5->lock);
- return ret;
+ return ret == 0;
+}
+
+static int sparx5_mact_lookup(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN],
+ u16 vid)
+{
+ u32 pcfg2;
+
+ if (sparx5_mact_find(sparx5, mac, vid, &pcfg2))
+ return 1;
+
+ return 0;
}
int sparx5_mact_forget(struct sparx5 *sparx5,
@@ -286,7 +299,8 @@ static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
}
int sparx5_add_mact_entry(struct sparx5 *sparx5,
- struct sparx5_port *port,
+ struct net_device *dev,
+ u16 portno,
const unsigned char *addr, u16 vid)
{
struct sparx5_mact_entry *mact_entry;
@@ -302,14 +316,14 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
* mact thread to start the frame will reach CPU and the CPU will
* add the entry but without the extern_learn flag.
*/
- mact_entry = find_mact_entry(sparx5, addr, vid, port->portno);
+ mact_entry = find_mact_entry(sparx5, addr, vid, portno);
if (mact_entry)
goto update_hw;
/* Add the entry in SW MAC table not to get the notification when
* SW is pulling again
*/
- mact_entry = alloc_mact_entry(sparx5, addr, vid, port->portno);
+ mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
if (!mact_entry)
return -ENOMEM;
@@ -318,13 +332,13 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5,
mutex_unlock(&sparx5->mact_lock);
update_hw:
- ret = sparx5_mact_learn(sparx5, port->portno, addr, vid);
+ ret = sparx5_mact_learn(sparx5, portno, addr, vid);
/* New entry? */
if (mact_entry->flags == 0) {
mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
- port->ndev, true);
+ dev, true);
}
return ret;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 16266275dd36..01be7bd84181 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -190,6 +190,7 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_ASM, 0x10600000, 1 }, /* 0x610600000 */
{ TARGET_GCB, 0x11010000, 2 }, /* 0x611010000 */
{ TARGET_QS, 0x11030000, 2 }, /* 0x611030000 */
+ { TARGET_PTP, 0x11040000, 2 }, /* 0x611040000 */
{ TARGET_ANA_ACL, 0x11050000, 2 }, /* 0x611050000 */
{ TARGET_LRN, 0x11060000, 2 }, /* 0x611060000 */
{ TARGET_VCAP_SUPER, 0x11080000, 2 }, /* 0x611080000 */
@@ -291,7 +292,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
/* Create a phylink for PHY management. Also handles SFPs */
spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
spx5_port->phylink_config.type = PHYLINK_NETDEV;
- spx5_port->phylink_config.pcs_poll = true;
spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
@@ -328,7 +328,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
return PTR_ERR(phylink);
spx5_port->phylink = phylink;
- phylink_set_pcs(phylink, &spx5_port->phylink_pcs);
return 0;
}
@@ -627,6 +626,9 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Init MAC table, ageing */
sparx5_mact_init(sparx5);
+ /* Init PGID table arbitrator */
+ sparx5_pgid_init(sparx5);
+
/* Setup VLANs */
sparx5_vlan_init(sparx5);
@@ -694,6 +696,18 @@ static int sparx5_start(struct sparx5 *sparx5)
} else {
sparx5->xtr_irq = -ENXIO;
}
+
+ if (sparx5->ptp_irq >= 0) {
+ err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq,
+ NULL, sparx5_ptp_irq_handler,
+ IRQF_ONESHOT, "sparx5-ptp",
+ sparx5);
+ if (err)
+ sparx5->ptp_irq = -ENXIO;
+
+ sparx5->ptp = 1;
+ }
+
return err;
}
@@ -810,6 +824,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma");
sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr");
+ sparx5->ptp_irq = platform_get_irq_byname(sparx5->pdev, "ptp");
/* Read chip ID to check CPU interface */
sparx5->chip_id = spx5_rd(sparx5, GCB_CHIP_ID);
@@ -848,6 +863,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
dev_err(sparx5->dev, "Start failed\n");
goto cleanup_ports;
}
+
+ err = sparx5_ptp_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "PTP failed\n");
+ goto cleanup_ports;
+ }
goto cleanup_config;
cleanup_ports:
@@ -871,6 +892,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
disable_irq(sparx5->fdma_irq);
sparx5->fdma_irq = -ENXIO;
}
+ sparx5_ptp_deinit(sparx5);
sparx5_fdma_stop(sparx5);
sparx5_cleanup_ports(sparx5);
/* Unregister netdevs */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index d40e18ce3293..7a04b8f2a546 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -14,6 +14,8 @@
#include <linux/if_vlan.h>
#include <linux/bitmap.h>
#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
#include "sparx5_main_regs.h"
@@ -64,6 +66,12 @@ enum sparx5_vlan_port_type {
#define PGID_BCAST (PGID_BASE + 6)
#define PGID_CPU (PGID_BASE + 7)
+#define PGID_TABLE_SIZE 3290
+
+#define PGID_MCAST_START 65
+#define PGID_GLAG_START 833
+#define PGID_GLAG_END 1088
+
#define IFH_LEN 9 /* 36 bytes */
#define NULL_VID 0
#define SPX5_MACT_PULL_DELAY (2 * HZ)
@@ -79,6 +87,18 @@ enum sparx5_vlan_port_type {
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
+#define SPARX5_PHC_COUNT 3
+#define SPARX5_PHC_PORT 0
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_ONE_STEP_PTP 0x3
+#define IFH_REW_OP_TWO_STEP_PTP 0x4
+
+#define IFH_PDU_TYPE_NONE 0x0
+#define IFH_PDU_TYPE_PTP 0x5
+#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
+#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
+
struct sparx5;
struct sparx5_db_hw {
@@ -167,9 +187,12 @@ struct sparx5_port {
enum sparx5_port_max_tags max_vlan_tags;
enum sparx5_vlan_port_type vlan_type;
u32 custom_etype;
- u32 ifh[IFH_LEN];
bool vlan_aware;
struct hrtimer inj_timer;
+ /* ptp */
+ u8 ptp_cmd;
+ u16 ts_id;
+ struct sk_buff_head tx_skbs;
};
enum sparx5_core_clockfreq {
@@ -179,6 +202,26 @@ enum sparx5_core_clockfreq {
SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
};
+struct sparx5_phc {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ struct hwtstamp_config hwtstamp_config;
+ struct sparx5 *sparx5;
+ u8 index;
+};
+
+struct sparx5_skb_cb {
+ u8 rew_op;
+ u8 pdu_type;
+ u8 pdu_w16_offset;
+ u16 ts_id;
+ unsigned long jiffies;
+};
+
+#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
+#define SPARX5_SKB_CB(skb) \
+ ((struct sparx5_skb_cb *)((skb)->cb))
+
struct sparx5 {
struct platform_device *pdev;
struct device *dev;
@@ -226,6 +269,16 @@ struct sparx5 {
int fdma_irq;
struct sparx5_rx rx;
struct sparx5_tx tx;
+ /* PTP */
+ bool ptp;
+ struct sparx5_phc phc[SPARX5_PHC_COUNT];
+ spinlock_t ptp_clock_lock; /* lock for phc */
+ spinlock_t ptp_ts_id_lock; /* lock for ts_id */
+ struct mutex ptp_lock; /* lock for ptp interface state */
+ u16 ptp_skbs;
+ int ptp_irq;
+ /* PGID allocation map */
+ u8 pgid_map[PGID_TABLE_SIZE];
};
/* sparx5_switchdev.c */
@@ -235,6 +288,7 @@ void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
/* sparx5_packet.c */
struct frame_info {
int src_port;
+ u32 timestamp;
};
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
@@ -256,10 +310,13 @@ int sparx5_mact_learn(struct sparx5 *sparx5, int port,
const unsigned char mac[ETH_ALEN], u16 vid);
bool sparx5_mact_getnext(struct sparx5 *sparx5,
unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
+bool sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
int sparx5_mact_forget(struct sparx5 *sparx5,
const unsigned char mac[ETH_ALEN], u16 vid);
int sparx5_add_mact_entry(struct sparx5 *sparx5,
- struct sparx5_port *port,
+ struct net_device *dev,
+ u16 portno,
const unsigned char *addr, u16 vid);
int sparx5_del_mact_entry(struct sparx5 *sparx5,
const unsigned char *addr,
@@ -288,12 +345,43 @@ void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats
int sparx_stats_init(struct sparx5 *sparx5);
/* sparx5_netdev.c */
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type);
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset);
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno);
bool sparx5_netdevice_check(const struct net_device *dev);
struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
int sparx5_register_netdevs(struct sparx5 *sparx5);
void sparx5_destroy_netdevs(struct sparx5 *sparx5);
void sparx5_unregister_netdevs(struct sparx5 *sparx5);
+/* sparx5_ptp.c */
+int sparx5_ptp_init(struct sparx5 *sparx5);
+void sparx5_ptp_deinit(struct sparx5 *sparx5);
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr);
+int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr);
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp);
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb);
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb);
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+
+/* sparx5_pgid.c */
+enum sparx5_pgid_type {
+ SPX5_PGID_FREE,
+ SPX5_PGID_RESERVED,
+ SPX5_PGID_MULTICAST,
+ SPX5_PGID_GLAG
+};
+
+void sparx5_pgid_init(struct sparx5 *spx5);
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 5ab2373a7178..c94de436b281 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2021-05-06 13:06:37 +0200.
- * Commit ID: 9ae4ec441e25e4b9003f4e514df5cb12a36b84d3
+/* This file is autogenerated by cml-utils 2022-02-26 14:15:01 +0100.
+ * Commit ID: 98bdd3d171cc2a1afd30d241d41a4281d471a48c (dirty)
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -40,6 +40,7 @@ enum sparx5_target {
TARGET_PCS25G_BR = 144,
TARGET_PCS5G_BR = 160,
TARGET_PORT_CONF = 173,
+ TARGET_PTP = 174,
TARGET_QFWD = 175,
TARGET_QRES = 176,
TARGET_QS = 177,
@@ -4156,6 +4157,249 @@ enum sparx5_target {
#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x)
+#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
+#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
+ FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\
+ FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+
+/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+
+#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
+#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_ENA, x)
+#define PTP_PTP_DOM_CFG_PTP_ENA_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_ENA, x)
+
+#define PTP_PTP_DOM_CFG_PTP_HOLD GENMASK(8, 6)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+#define PTP_PTP_DOM_CFG_PTP_HOLD_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_HOLD, x)
+
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE GENMASK(5, 3)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+#define PTP_PTP_DOM_CFG_PTP_TOD_FREEZE_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_TOD_FREEZE, x)
+
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS GENMASK(2, 0)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(x)\
+ FIELD_PREP(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\
+ FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
+#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
+#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
+#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
+#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
+#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_CLK_SELECT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM GENMASK(17, 16)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_DOM_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_DOM, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT GENMASK(15, 14)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OPT_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OPT, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK BIT(13)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_EMBEDDED_CLK, x)
+
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS GENMASK(12, 0)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_SET(x)\
+ FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\
+ FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
+
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
+#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
+ FIELD_PREP(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\
+ FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
+
+/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
+#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
+#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\
+ FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
+#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
+ FIELD_PREP(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\
+ FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
+
+/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
+#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL, x)
+
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG GENMASK(2, 0)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_SET(x)\
+ FIELD_PREP(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\
+ FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+
+#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
+#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x)
+#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x)
+
+#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6)
+#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x)
+#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x)
+
+#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3)
+#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x)
+#define PTP_PHAD_CTRL_REDUCED_RES_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_REDUCED_RES, x)
+
+#define PTP_PHAD_CTRL_LOCK_ACC GENMASK(2, 0)
+#define PTP_PHAD_CTRL_LOCK_ACC_SET(x)\
+ FIELD_PREP(PTP_PHAD_CTRL_LOCK_ACC, x)
+#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\
+ FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
+
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
+#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
+
/* QFWD:SYSTEM:SWITCH_PORT_MODE */
#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
@@ -4528,6 +4772,93 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
+#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT BIT(11)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_NXT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD BIT(10)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_VLD, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
+#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
+#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
+ FIELD_PREP(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\
+ FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
+#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
+#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
+#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
+ FIELD_PREP(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\
+ FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
+
+/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
+#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_OFS_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_OFS, x)
+
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT GENMASK(1, 0)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_SET(x)\
+ FIELD_PREP(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\
+ FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
+
/* REW:RAM_CTRL:RAM_INIT */
#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index e042f117dc7a..af4d3e1f1a6d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -54,7 +54,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
}
-static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
{
/* VSTAX.RSV = 1. MSBit must be 1 */
ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
@@ -74,6 +74,26 @@ static void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
}
+void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
+{
+ ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10);
+}
+
+void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4);
+}
+
+void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset)
+{
+ ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6);
+}
+
+void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp)
+{
+ ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40);
+}
+
static int sparx5_port_open(struct net_device *ndev)
{
struct sparx5_port *port = netdev_priv(ndev);
@@ -179,6 +199,24 @@ static int sparx5_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return sparx5_ptp_hwtstamp_set(sparx5_port, ifr);
+ case SIOCGHWTSTAMP:
+ return sparx5_ptp_hwtstamp_get(sparx5_port, ifr);
+ }
+ }
+
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_open = sparx5_port_open,
.ndo_stop = sparx5_port_stop,
@@ -189,6 +227,7 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
+ .ndo_eth_ioctl = sparx5_port_ioctl,
};
bool sparx5_netdevice_check(const struct net_device *dev)
@@ -210,7 +249,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
spx5_port->ndev = ndev;
spx5_port->sparx5 = sparx5;
spx5_port->portno = portno;
- sparx5_set_port_ifh(spx5_port->ifh, portno);
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 148d431fcde4..304f84aadc36 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -44,6 +44,12 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
((u32)xtr_hdr[30] << 0);
fwd = (fwd >> 5);
info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+
+ info->timestamp =
+ ((u64)xtr_hdr[2] << 24) |
+ ((u64)xtr_hdr[3] << 16) |
+ ((u64)xtr_hdr[4] << 8) |
+ ((u64)xtr_hdr[5] << 0);
}
static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
@@ -144,6 +150,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* Finish up skb */
skb_put(skb, byte_cnt - ETH_FCS_LEN);
eth_skb_pad(skb);
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_bytes += skb->len;
netdev->stats.rx_packets++;
@@ -218,20 +225,44 @@ int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ u32 ifh[IFH_LEN];
int ret;
+ memset(ifh, 0, IFH_LEN * 4);
+ sparx5_set_port_ifh(ifh, port->portno);
+
+ if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ret = sparx5_ptp_txtstamp_request(port, skb);
+ if (ret)
+ return ret;
+
+ sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
+ sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
+ sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
+ sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
+ }
+
+ skb_tx_timestamp(skb);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, port->ifh, skb);
+ ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
- ret = sparx5_inject(sparx5, port->ifh, skb, dev);
+ ret = sparx5_inject(sparx5, ifh, skb, dev);
if (ret == NETDEV_TX_OK) {
stats->tx_bytes += skb->len;
stats->tx_packets++;
- skb_tx_timestamp(skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ return ret;
+
dev_kfree_skb_any(skb);
} else {
stats->tx_dropped++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ sparx5_ptp_txtstamp_release(port, skb);
}
return ret;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
new file mode 100644
index 000000000000..90366fcb9958
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include "sparx5_main.h"
+
+void sparx5_pgid_init(struct sparx5 *spx5)
+{
+ int i;
+
+ for (i = 0; i < PGID_TABLE_SIZE; i++)
+ spx5->pgid_map[i] = SPX5_PGID_FREE;
+
+ /* Reserved for unicast, flood control, broadcast, and CPU.
+ * These cannot be freed.
+ */
+ for (i = 0; i <= PGID_CPU; i++)
+ spx5->pgid_map[i] = SPX5_PGID_RESERVED;
+}
+
+int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx)
+{
+ int i;
+
+ for (i = PGID_GLAG_START; i <= PGID_GLAG_END; i++)
+ if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+ spx5->pgid_map[i] = SPX5_PGID_GLAG;
+ *idx = i;
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
+{
+ int i;
+
+ for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+ if (i == PGID_GLAG_START)
+ i = PGID_GLAG_END + 1;
+
+ if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
+ spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -EBUSY;
+}
+
+int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
+{
+ if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+ return -EINVAL;
+
+ if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
+ return -EINVAL;
+
+ spx5->pgid_map[idx] = SPX5_PGID_FREE;
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index 8ba33bc1a001..830da0e5ff27 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -26,6 +26,15 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po
return false;
}
+static struct phylink_pcs *
+sparx5_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
+
+ return &port->phylink_pcs;
+}
+
static void sparx5_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -130,6 +139,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
.validate = phylink_generic_validate,
+ .mac_select_pcs = sparx5_phylink_mac_select_pcs,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
.mac_link_up = sparx5_phylink_mac_link_up,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
new file mode 100644
index 000000000000..0ed1ea7727c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
+ *
+ * The Sparx5 Chip Register Model can be browsed at this location:
+ * https://github.com/microchip-ung/sparx-5_reginfo
+ */
+#include <linux/ptp_classify.h>
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPARX5_MAX_PTP_ID 512
+
+#define TOD_ACC_PIN 0x4
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_TOD
+};
+
+static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
+{
+ /* Represents 1ppm adjustment in 2^59 format with 1.59687500000(625)
+ * 1.99609375000(500), 3.99218750000(250) as reference
+ * The value is calculated as following:
+ * (1/1000000)/((2^-59)/X)
+ */
+
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 2301339409586;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 1150669704793;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 920535763834;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
+{
+ u64 res = 0;
+
+ switch (sparx5->coreclock) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ res = 0x1FF0000000000000;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ res = 0x0FF8000000000000;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ res = 0x0CC6666666666666;
+ break;
+ default:
+ WARN(1, "Invalid core clock");
+ break;
+ }
+
+ return res;
+}
+
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct hwtstamp_config cfg;
+ struct sparx5_phc *phc;
+
+ /* For now don't allow to run ptp on ports that are part of a bridge,
+ * because in case of transparent clock the HW will still forward the
+ * frames, so there would be duplicate frames
+ */
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = IFH_REW_OP_NOOP;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ mutex_lock(&sparx5->ptp_lock);
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&sparx5->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_phc *phc;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
+ sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type, u8 *pdu_w16_offset)
+{
+ struct ptp_header *header;
+ u8 msgtype;
+ int type;
+
+ if (port->ptp_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ header = ptp_parse_header(skb, type);
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ *pdu_w16_offset = 0;
+ return;
+ }
+
+ *pdu_w16_offset = 7;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_PTP;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4_UDP_PTP;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6_UDP_PTP;
+
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
+
+ /* If it is sync and run 1 step then set the correct operation,
+ * otherwise run as 2 step
+ */
+ msgtype = ptp_get_msgtype(header, type);
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
+
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+}
+
+static void sparx5_ptp_txtstamp_old_release(struct sparx5_port *port)
+{
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if time_after(SPARX5_SKB_CB(skb)->jiffies + SPARX5_PTP_TIMEOUT,
+ jiffies)
+ break;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+}
+
+int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 rew_op, pdu_type, pdu_w16_offset;
+ unsigned long flags;
+
+ sparx5_ptp_classify(port, skb, &rew_op, &pdu_type, &pdu_w16_offset);
+ SPARX5_SKB_CB(skb)->rew_op = rew_op;
+ SPARX5_SKB_CB(skb)->pdu_type = pdu_type;
+ SPARX5_SKB_CB(skb)->pdu_w16_offset = pdu_w16_offset;
+
+ if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
+ return 0;
+
+ sparx5_ptp_txtstamp_old_release(port);
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ if (sparx5->ptp_skbs == SPARX5_MAX_PTP_ID) {
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_queue_tail(&port->tx_skbs, skb);
+ SPARX5_SKB_CB(skb)->ts_id = port->ts_id;
+ SPARX5_SKB_CB(skb)->jiffies = jiffies;
+
+ sparx5->ptp_skbs++;
+ port->ts_id++;
+ if (port->ts_id == SPARX5_MAX_PTP_ID)
+ port->ts_id = 0;
+
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+
+ return 0;
+}
+
+void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
+ struct sk_buff *skb)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_ts_id_lock, flags);
+ port->ts_id--;
+ sparx5->ptp_skbs--;
+ skb_unlink(skb, &port->tx_skbs);
+ spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
+}
+
+static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec)
+{
+ /* Read current PTP time to get seconds */
+ unsigned long flags;
+ u32 curr_nsec;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(SPARX5_PHC_PORT) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ ts->tv_nsec = nsec;
+
+ /* Sec has incremented since the ts was registered */
+ if (curr_nsec < nsec)
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+}
+
+irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
+{
+ int budget = SPARX5_MAX_PTP_ID;
+ struct sparx5 *sparx5 = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sparx5_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ WARN_ON(val & REW_PTP_TWOSTEP_CTRL_PTP_OVFL);
+
+ if (!(val & REW_PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = REW_PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = sparx5->ports[txport];
+
+ /* Retrieve the delay */
+ delay = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ delay = REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ val = spx5_rd(sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retried */
+ if (!(val & REW_PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP);
+ id <<= 8;
+ id |= spx5_rd(sparx5, REW_PTP_TWOSTEP_STAMP_SUBNS);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (SPARX5_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ spx5_rmw(REW_PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ REW_PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, REW_PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&sparx5->ptp_ts_id_lock);
+ sparx5->ptp_skbs--;
+ spin_unlock(&sparx5->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ sparx5_get_hwtimestamp(sparx5, &ts, delay);
+
+ /* Set the timestamp into the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sparx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ bool neg_adj = 0;
+ u64 tod_inc;
+ u64 ref;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ tod_inc = sparx5_ptp_get_nominal_value(sparx5);
+
+ /* The multiplication is split in 2 separate additions because of
+ * overflow issues. If scaled_ppm with 16bit fractional part was bigger
+ * than 20ppm then we got overflow.
+ */
+ ref = sparx5_ptp_get_1ppm(sparx5) * (scaled_ppm >> 16);
+ ref += (sparx5_ptp_get_1ppm(sparx5) * (0xffff & scaled_ppm)) >> 16;
+ tod_inc = neg_adj ? tod_inc - ref : tod_inc + ref;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(1 << BIT(phc->index)),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ spx5_wr((u32)tod_inc & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(phc->index, 0));
+ spx5_wr((u32)(tod_inc >> 32), sparx5,
+ PTP_CLK_PER_CFG(phc->index, 1));
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, sparx5,
+ PTP_PTP_DOM_CFG);
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ /* Set new value */
+ spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
+ sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ spx5_wr(lower_32_bits(ts->tv_sec),
+ sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Apply new values */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ return 0;
+}
+
+static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+ unsigned long flags;
+ time64_t s;
+ s64 ns;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s <<= 32;
+ s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
+ ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
+ struct sparx5 *sparx5 = phc->sparx5;
+
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
+
+ /* Must be in IDLE mode before the time can be loaded */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
+ sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+
+ /* Adjust time with the value of PTP_TOD_NSEC */
+ spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM_SET(phc->index) |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(0),
+ PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
+ PTP_PTP_PIN_CFG_PTP_PIN_DOM |
+ PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+
+ spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using sparx5_ptp_settime64 which is not exact */
+ struct timespec64 ts;
+ u64 now;
+
+ sparx5_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ sparx5_ptp_settime64(ptp, &ts);
+ }
+
+ return 0;
+}
+
+static struct ptp_clock_info sparx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sparx5 ptp",
+ .max_adj = 200000,
+ .gettime64 = sparx5_ptp_gettime64,
+ .settime64 = sparx5_ptp_settime64,
+ .adjtime = sparx5_ptp_adjtime,
+ .adjfine = sparx5_ptp_adjfine,
+};
+
+static int sparx5_ptp_phc_init(struct sparx5 *sparx5,
+ int index,
+ struct ptp_clock_info *clock_info)
+{
+ struct sparx5_phc *phc = &sparx5->phc[index];
+
+ phc->info = *clock_info;
+ phc->clock = ptp_clock_register(&phc->info, sparx5->dev);
+ if (IS_ERR(phc->clock))
+ return PTR_ERR(phc->clock);
+
+ phc->index = index;
+ phc->sparx5 = sparx5;
+
+ /* PTP Rx stamping is always enabled. */
+ phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
+int sparx5_ptp_init(struct sparx5 *sparx5)
+{
+ u64 tod_adj = sparx5_ptp_get_nominal_value(sparx5);
+ struct sparx5_port *port;
+ int err, i;
+
+ if (!sparx5->ptp)
+ return 0;
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ err = sparx5_ptp_phc_init(sparx5, i, &sparx5_ptp_clock_info);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&sparx5->ptp_clock_lock);
+ spin_lock_init(&sparx5->ptp_ts_id_lock);
+ mutex_init(&sparx5->ptp_lock);
+
+ /* Disable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0), sparx5, PTP_PTP_DOM_CFG);
+
+ /* Configure the nominal TOD increment per clock cycle */
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0x7),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i) {
+ spx5_wr((u32)tod_adj & 0xFFFFFFFF, sparx5,
+ PTP_CLK_PER_CFG(i, 0));
+ spx5_wr((u32)(tod_adj >> 32), sparx5,
+ PTP_CLK_PER_CFG(i, 1));
+ }
+
+ spx5_rmw(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_SET(0),
+ PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS,
+ sparx5, PTP_PTP_DOM_CFG);
+
+ /* Enable master counters */
+ spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
+
+ for (i = 0; i < sparx5->port_count; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_head_init(&port->tx_skbs);
+ }
+
+ return 0;
+}
+
+void sparx5_ptp_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_port *port;
+ int i;
+
+ for (i = 0; i < sparx5->port_count; i++) {
+ port = sparx5->ports[i];
+ if (!port)
+ continue;
+
+ skb_queue_purge(&port->tx_skbs);
+ }
+
+ for (i = 0; i < SPARX5_PHC_COUNT; ++i)
+ ptp_clock_unregister(sparx5->phc[i].clock);
+}
+
+void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
+ u64 timestamp)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sparx5_phc *phc;
+ struct timespec64 ts;
+ u64 full_ts_in_ns;
+
+ if (!sparx5->ptp)
+ return;
+
+ phc = &sparx5->phc[SPARX5_PHC_PORT];
+ sparx5_ptp_gettime64(&phc->info, &ts);
+
+ if (ts.tv_nsec < timestamp)
+ ts.tv_sec--;
+ ts.tv_nsec = timestamp;
+ full_ts_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 649ca609884a..2d8e0b81c839 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -16,14 +16,31 @@ struct sparx5_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct sparx5 *sparx5;
unsigned long event;
};
+static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
+ int pgid;
+
if (flags.mask & BR_MCAST_FLOOD)
- sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
+ for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
+ if (flags.mask & BR_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
+ if (flags.mask & BR_BCAST_FLOOD)
+ sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
}
static void sparx5_attr_stp_state_set(struct sparx5_port *port,
@@ -72,6 +89,9 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
struct sparx5_port *port = netdev_priv(dev);
switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ return sparx5_port_attr_pre_bridge_flags(port,
+ attr->u.brport_flags);
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
break;
@@ -82,6 +102,11 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (port->pvid == 0)
+ port->pvid = 1;
port->vlan_aware = attr->u.vlan_filtering;
sparx5_vlan_port_apply(port->sparx5, port);
break;
@@ -117,6 +142,9 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
if (err)
goto err_switchdev_offload;
+ /* Remove standalone port entry */
+ sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
+
/* Port enters in bridge mode therefor don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
@@ -145,6 +173,9 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
port->pvid = NULL_VID;
port->vid = NULL_VID;
+ /* Forward frames to CPU */
+ sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
}
@@ -228,31 +259,43 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
struct switchdev_notifier_fdb_info *fdb_info;
struct sparx5_port *port;
struct sparx5 *sparx5;
+ bool host_addr;
+ u16 vid;
rtnl_lock();
- if (!sparx5_netdevice_check(dev))
- goto out;
-
- port = netdev_priv(dev);
- sparx5 = port->sparx5;
+ if (!sparx5_netdevice_check(dev)) {
+ host_addr = true;
+ sparx5 = switchdev_work->sparx5;
+ } else {
+ host_addr = false;
+ sparx5 = switchdev_work->sparx5;
+ port = netdev_priv(dev);
+ }
fdb_info = &switchdev_work->fdb_info;
+ /* Used PVID 1 when default_pvid is 0, to avoid
+ * collision with non-bridged ports.
+ */
+ if (fdb_info->vid == 0)
+ vid = 1;
+ else
+ vid = fdb_info->vid;
+
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
- fdb_info->vid);
+ if (host_addr)
+ sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+ fdb_info->addr, vid);
+ else
+ sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
+ fdb_info->addr, vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
+ sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
break;
}
-out:
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
kfree(switchdev_work);
@@ -264,15 +307,18 @@ static void sparx5_schedule_work(struct work_struct *work)
queue_work(sparx5_owq, work);
}
-static int sparx5_switchdev_event(struct notifier_block *unused,
+static int sparx5_switchdev_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct sparx5_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info;
struct switchdev_notifier_info *info = ptr;
+ struct sparx5 *spx5;
int err;
+ spx5 = container_of(nb, struct sparx5, switchdev_nb);
+
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
@@ -288,6 +334,7 @@ static int sparx5_switchdev_event(struct notifier_block *unused,
switchdev_work->dev = dev;
switchdev_work->event = event;
+ switchdev_work->sparx5 = spx5;
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
@@ -314,77 +361,132 @@ err_addr_alloc:
return NOTIFY_BAD;
}
-static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
- struct sparx5_port *port,
- u16 vid, bool add)
+static int sparx5_handle_port_vlan_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_vlan *v)
{
- if (!port ||
- !test_bit(port->portno, sparx5->bridge_mask))
- return; /* Skip null/host interfaces */
-
- /* Bridge connects to vid? */
- if (add) {
- /* Add port MAC address from the VLAN */
- sparx5_mact_learn(sparx5, PGID_CPU,
- port->ndev->dev_addr, vid);
- } else {
- /* Control port addr visibility depending on
- * port VLAN connectivity.
- */
- if (test_bit(port->portno, sparx5->vlan_mask[vid]))
- sparx5_mact_learn(sparx5, PGID_CPU,
- port->ndev->dev_addr, vid);
- else
- sparx5_mact_forget(sparx5,
- port->ndev->dev_addr, vid);
+ struct sparx5_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(dev)) {
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
+
+ /* Flood broadcast to CPU */
+ sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
+ v->vid);
+ return 0;
}
+
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
+ return sparx5_vlan_vid_add(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
-static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
- struct sparx5 *sparx5,
- u16 vid, bool add)
+static int sparx5_handle_port_mdb_add(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
{
- int i;
+ struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ u16 pgid_idx, vid;
+ u32 mact_entry;
+ int res, err;
- /* First, handle bridge address'es */
- if (add) {
- sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
- vid);
- sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
- vid);
+ /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
+ * Fall back to bridge vid 1.
+ */
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
+
+ res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
+
+ if (res) {
+ pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
+
+ /* MC_IDX has an offset of 65 in the PGID table. */
+ pgid_idx += PGID_MCAST_START;
+ sparx5_pgid_update_mask(port, pgid_idx, true);
} else {
- sparx5_mact_forget(sparx5, dev->dev_addr, vid);
- sparx5_mact_forget(sparx5, dev->broadcast, vid);
+ err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
+ if (err) {
+ netdev_warn(dev, "multicast pgid table full\n");
+ return err;
+ }
+ sparx5_pgid_update_mask(port, pgid_idx, true);
+ err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
+ if (err) {
+ netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
+ sparx5_pgid_update_mask(port, pgid_idx, false);
+ return err;
+ }
}
- /* Now look at bridged ports */
- for (i = 0; i < SPX5_PORTS; i++)
- sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
+ return 0;
}
-static int sparx5_handle_port_vlan_add(struct net_device *dev,
- struct notifier_block *nb,
- const struct switchdev_obj_port_vlan *v)
+static int sparx5_mdb_del_entry(struct net_device *dev,
+ struct sparx5 *spx5,
+ const unsigned char mac[ETH_ALEN],
+ const u16 vid,
+ u16 pgid_idx)
+{
+ int err;
+
+ err = sparx5_mact_forget(spx5, mac, vid);
+ if (err) {
+ netdev_warn(dev, "could not forget mac address %pM", mac);
+ return err;
+ }
+ err = sparx5_pgid_free(spx5, pgid_idx);
+ if (err) {
+ netdev_err(dev, "attempted to free already freed pgid\n");
+ return err;
+ }
+ return 0;
+}
+
+static int sparx5_handle_port_mdb_del(struct net_device *dev,
+ struct notifier_block *nb,
+ const struct switchdev_obj_port_mdb *v)
{
struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *spx5 = port->sparx5;
+ u16 pgid_idx, vid;
+ u32 mact_entry, res, pgid_entry[3];
+ int err;
- if (netif_is_bridge_master(dev)) {
- if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
- struct sparx5 *sparx5 =
- container_of(nb, struct sparx5,
- switchdev_blocking_nb);
+ if (!br_vlan_enabled(spx5->hw_bridge_dev))
+ vid = 1;
+ else
+ vid = v->vid;
- sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
+ res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
+
+ if (res) {
+ pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
+
+ /* MC_IDX has an offset of 65 in the PGID table. */
+ pgid_idx += PGID_MCAST_START;
+ sparx5_pgid_update_mask(port, pgid_idx, false);
+
+ pgid_entry[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid_idx));
+ pgid_entry[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid_idx));
+ pgid_entry[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid_idx));
+ if (pgid_entry[0] == 0 && pgid_entry[1] == 0 && pgid_entry[2] == 0) {
+ /* No ports are in MC group. Remove entry */
+ err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
+ if (err)
+ return err;
}
- return 0;
}
- if (!sparx5_netdevice_check(dev))
- return -EOPNOTSUPP;
-
- return sparx5_vlan_vid_add(port, v->vid,
- v->flags & BRIDGE_VLAN_INFO_PVID,
- v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ return 0;
}
static int sparx5_handle_port_obj_add(struct net_device *dev,
@@ -399,6 +501,10 @@ static int sparx5_handle_port_obj_add(struct net_device *dev,
err = sparx5_handle_port_vlan_add(dev, nb,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = sparx5_handle_port_mdb_add(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -421,7 +527,7 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
container_of(nb, struct sparx5,
switchdev_blocking_nb);
- sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
+ sparx5_mact_forget(sparx5, dev->broadcast, vid);
return 0;
}
@@ -432,9 +538,6 @@ static int sparx5_handle_port_vlan_del(struct net_device *dev,
if (ret)
return ret;
- /* Delete the port MAC address with the matching VLAN information */
- sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
-
return 0;
}
@@ -450,6 +553,10 @@ static int sparx5_handle_port_obj_del(struct net_device *dev,
err = sparx5_handle_port_vlan_del(dev, nb,
SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = sparx5_handle_port_mdb_del(dev, nb,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 636dfef24a6c..49b85ca578b0 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -663,7 +663,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_context *gc = gd->gdma_context;
struct hw_channel_context *hwc;
u32 length = gmi->length;
- u32 req_msg_size;
+ size_t req_msg_size;
int err;
int i;
@@ -674,7 +674,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
return -EINVAL;
hwc = gc->hwc.driver_data;
- req_msg_size = sizeof(*req) + num_page * sizeof(u64);
+ req_msg_size = struct_size(req, page_addr_list, num_page);
if (req_msg_size > hwc->max_req_msg_size)
return -EINVAL;
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index 9a12607fb511..d36405af9432 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -48,7 +48,15 @@ enum TRI_STATE {
#define MAX_PORTS_IN_MANA_DEV 256
-struct mana_stats {
+struct mana_stats_rx {
+ u64 packets;
+ u64 bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ struct u64_stats_sync syncp;
+};
+
+struct mana_stats_tx {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
@@ -76,7 +84,7 @@ struct mana_txq {
atomic_t pending_sends;
- struct mana_stats stats;
+ struct mana_stats_tx stats;
};
/* skb data and frags dma mappings */
@@ -298,10 +306,11 @@ struct mana_rxq {
u32 buf_index;
- struct mana_stats stats;
+ struct mana_stats_rx stats;
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ struct page *xdp_save_page;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 498d0f999275..b7d3ba1b4d17 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -136,7 +136,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
bool ipv4 = false, ipv6 = false;
struct mana_tx_package pkg = {};
struct netdev_queue *net_txq;
- struct mana_stats *tx_stats;
+ struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
unsigned int csum_type;
struct mana_txq *txq;
@@ -299,7 +299,8 @@ static void mana_get_stats64(struct net_device *ndev,
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
- struct mana_stats *stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
int q;
@@ -310,26 +311,26 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
for (q = 0; q < num_queues; q++) {
- stats = &apc->rxqs[q]->stats;
+ rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
st->rx_packets += packets;
st->rx_bytes += bytes;
}
for (q = 0; q < num_queues; q++) {
- stats = &apc->tx_qp[q].txq.stats;
+ tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
st->tx_packets += packets;
st->tx_bytes += bytes;
@@ -986,7 +987,7 @@ static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
- struct mana_stats *rx_stats = &rxq->stats;
+ struct mana_stats_rx *rx_stats = &rxq->stats;
struct net_device *ndev = rxq->ndev;
uint pkt_len = cqe->ppi[0].pkt_len;
u16 rxq_idx = rxq->rxq_idx;
@@ -1007,7 +1008,7 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
if (act != XDP_PASS && act != XDP_TX)
- goto drop;
+ goto drop_xdp;
skb = mana_build_skb(buf_va, pkt_len, &xdp);
@@ -1034,6 +1035,14 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
}
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += pkt_len;
+
+ if (act == XDP_TX)
+ rx_stats->xdp_tx++;
+ u64_stats_update_end(&rx_stats->syncp);
+
if (act == XDP_TX) {
skb_set_queue_mapping(skb, rxq_idx);
mana_xdp_tx(skb, ndev);
@@ -1042,15 +1051,19 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
napi_gro_receive(napi, skb);
+ return;
+
+drop_xdp:
u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->packets++;
- rx_stats->bytes += pkt_len;
+ rx_stats->xdp_drop++;
u64_stats_update_end(&rx_stats->syncp);
- return;
drop:
- free_page((unsigned long)buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_page);
+ rxq->xdp_save_page = virt_to_page(buf_va);
+
++ndev->stats.rx_dropped;
+
return;
}
@@ -1072,8 +1085,10 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
break;
case CQE_RX_TRUNCATED:
- netdev_err(ndev, "Dropped a truncated packet\n");
- return;
+ ++ndev->stats.rx_dropped;
+ rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
+ netdev_warn_once(ndev, "Dropped a truncated packet\n");
+ goto drop;
case CQE_RX_COALESCED_4:
netdev_err(ndev, "RX coalescing is unsupported\n");
@@ -1089,9 +1104,6 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
}
- if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
- return;
-
pktlen = oob->ppi[0].pkt_len;
if (pktlen == 0) {
@@ -1105,7 +1117,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- new_page = alloc_page(GFP_ATOMIC);
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_page) {
+ new_page = rxq->xdp_save_page;
+ rxq->xdp_save_page = NULL;
+ } else {
+ new_page = alloc_page(GFP_ATOMIC);
+ }
if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1135,6 +1153,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
mana_rx_skb(old_buf, oob, rxq);
+drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
mana_post_pkt_rxq(rxq);
@@ -1392,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
+ if (rxq->xdp_save_page)
+ __free_page(rxq->xdp_save_page);
+
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c3c81ae3fafd..e13f2453eabb 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -23,7 +23,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues * 4;
+ return ARRAY_SIZE(mana_eth_stats) + num_queues * 6;
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -46,6 +46,10 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "rx_%d_bytes", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_drop", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_%d_xdp_tx", i);
+ p += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
@@ -62,9 +66,12 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
- struct mana_stats *stats;
+ struct mana_stats_rx *rx_stats;
+ struct mana_stats_tx *tx_stats;
unsigned int start;
u64 packets, bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
int q, i = 0;
if (!apc->port_is_up)
@@ -74,26 +81,30 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
for (q = 0; q < num_queues; q++) {
- stats = &apc->rxqs[q]->stats;
+ rx_stats = &apc->rxqs[q]->stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ packets = rx_stats->packets;
+ bytes = rx_stats->bytes;
+ xdp_drop = rx_stats->xdp_drop;
+ xdp_tx = rx_stats->xdp_tx;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
+ data[i++] = xdp_drop;
+ data[i++] = xdp_tx;
}
for (q = 0; q < num_queues; q++) {
- stats = &apc->tx_qp[q].txq.stats;
+ tx_stats = &apc->tx_qp[q].txq.stats;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->packets;
- bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ packets = tx_stats->packets;
+ bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 15179b9529e1..afb7dcadb8d2 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -510,14 +510,14 @@ static int moxart_mac_probe(struct platform_device *pdev)
}
priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->tx_buf_base) {
ret = -ENOMEM;
goto init_fail;
}
priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->rx_buf_base) {
ret = -ENOMEM;
goto init_fail;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index fd3ceb74620d..e443bd8b2d09 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -13,6 +13,7 @@
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
+#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
u8 mac[ETH_ALEN];
@@ -221,6 +222,35 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
REW_PORT_CFG, port);
}
+static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = NULL;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->bridge ||
+ !br_vlan_enabled(ocelot_port->bridge))
+ continue;
+
+ if (!bridge) {
+ bridge = ocelot_port->bridge;
+ continue;
+ }
+
+ if (bridge == ocelot_port->bridge)
+ continue;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one VLAN-aware bridge is supported");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -347,12 +377,45 @@ static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
}
}
+int ocelot_bridge_num_find(struct ocelot *ocelot,
+ const struct net_device *bridge)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (ocelot_port && ocelot_port->bridge == bridge)
+ return ocelot_port->bridge_num;
+ }
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(ocelot_bridge_num_find);
+
+static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
+ const struct net_device *bridge)
+{
+ int bridge_num;
+
+ /* Standalone ports use VID 0 */
+ if (!bridge)
+ return 0;
+
+ bridge_num = ocelot_bridge_num_find(ocelot, bridge);
+ if (WARN_ON(bridge_num < 0))
+ return 0;
+
+ /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */
+ return VLAN_N_VID - bridge_num - 1;
+}
+
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
+ u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
@@ -466,12 +529,29 @@ static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
return 0;
}
+static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port,
+ const struct net_device *bridge)
+{
+ u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ return ocelot_vlan_member_add(ocelot, port, vid, true);
+}
+
+static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port,
+ const struct net_device *bridge)
+{
+ u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ return ocelot_vlan_member_del(ocelot, port, vid);
+}
+
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
bool vlan_aware, struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter;
+ int err;
u32 val;
list_for_each_entry(filter, &block->rules, list) {
@@ -483,6 +563,19 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
}
}
+ err = ocelot_single_vlan_aware_bridge(ocelot, extack);
+ if (err)
+ return err;
+
+ if (vlan_aware)
+ err = ocelot_del_vlan_unaware_pvid(ocelot, port,
+ ocelot_port->bridge);
+ else
+ err = ocelot_add_vlan_unaware_pvid(ocelot, port,
+ ocelot_port->bridge);
+ if (err)
+ return err;
+
ocelot_port->vlan_aware = vlan_aware;
if (vlan_aware)
@@ -521,6 +614,12 @@ int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
}
}
+ if (vid > OCELOT_RSV_VLAN_RANGE_START) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN range 4000-4095 reserved for VLAN-unaware bridging");
+ return -EBUSY;
+ }
+
return 0;
}
EXPORT_SYMBOL(ocelot_vlan_prepare);
@@ -584,11 +683,11 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
for (vid = 1; vid < VLAN_N_VID; vid++)
ocelot_vlant_set_mask(ocelot, vid, 0);
- /* Because VLAN filtering is enabled, we need VID 0 to get untagged
- * traffic. It is added automatically if 8021q module is loaded, but
- * we can't rely on it since module may be not loaded.
+ /* We need VID 0 to get traffic on standalone ports.
+ * It is added automatically if the 8021q module is loaded, but we
+ * can't rely on that since it might not be.
*/
- ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
+ ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -1237,21 +1336,27 @@ void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_drain_cpu_queue);
-int ocelot_fdb_add(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid)
+int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge)
{
int pgid = port;
if (port == ocelot->npi)
pgid = PGID_CPU;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
-int ocelot_fdb_del(struct ocelot *ocelot, int port,
- const unsigned char *addr, u16 vid)
+int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
+ u16 vid, const struct net_device *bridge)
{
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
return ocelot_mact_forget(ocelot, addr, vid);
}
EXPORT_SYMBOL(ocelot_fdb_del);
@@ -1413,6 +1518,12 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
is_static = (entry.type == ENTRYTYPE_LOCKED);
+ /* Hide the reserved VLANs used for
+ * VLAN-unaware bridging.
+ */
+ if (entry.vid > OCELOT_RSV_VLAN_RANGE_START)
+ entry.vid = 0;
+
err = cb(entry.mac, entry.vid, is_static, data);
if (err)
break;
@@ -1472,9 +1583,9 @@ ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
trap->key.ipv6.dport.mask = 0xffff;
}
-static int ocelot_trap_add(struct ocelot *ocelot, int port,
- unsigned long cookie,
- void (*populate)(struct ocelot_vcap_filter *f))
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f))
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
@@ -1500,6 +1611,8 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.cpu_copy_ena = true;
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
+ trap->take_ts = take_ts;
+ list_add_tail(&trap->trap_list, &ocelot->traps);
new = true;
}
@@ -1511,16 +1624,17 @@ static int ocelot_trap_add(struct ocelot *ocelot, int port,
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask)
+ if (!trap->ingress_port_mask) {
+ list_del(&trap->trap_list);
kfree(trap);
+ }
return err;
}
return 0;
}
-static int ocelot_trap_del(struct ocelot *ocelot, int port,
- unsigned long cookie)
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
@@ -1533,39 +1647,42 @@ static int ocelot_trap_del(struct ocelot *ocelot, int port,
return 0;
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask)
+ if (!trap->ingress_port_mask) {
+ list_del(&trap->trap_list);
+
return ocelot_vcap_filter_del(ocelot, trap);
+ }
return ocelot_vcap_filter_replace(ocelot, trap);
}
static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
- return ocelot_trap_add(ocelot, port, l2_cookie,
+ return ocelot_trap_add(ocelot, port, l2_cookie, true,
ocelot_populate_l2_ptp_trap_key);
}
static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, l2_cookie);
}
static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
- err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
ocelot_populate_ipv4_ptp_event_trap_key);
if (err)
return err;
- err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
ocelot_populate_ipv4_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
@@ -1575,8 +1692,8 @@ static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
@@ -1586,16 +1703,16 @@ static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
- err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
ocelot_populate_ipv6_ptp_event_trap_key);
if (err)
return err;
- err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
ocelot_populate_ipv6_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
@@ -1605,8 +1722,8 @@ static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
@@ -1750,28 +1867,36 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
EXPORT_SYMBOL(ocelot_get_strings);
/* Caller must hold &ocelot->stats_lock */
-static void ocelot_update_stats(struct ocelot *ocelot)
+static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- int i, j;
+ unsigned int idx = port * ocelot->num_stats;
+ struct ocelot_stats_region *region;
+ int err, j;
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
- for (j = 0; j < ocelot->num_stats; j++) {
- u32 val;
- unsigned int idx = i * ocelot->num_stats + j;
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
+ region->offset, region->buf,
+ region->count);
+ if (err)
+ return err;
- val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- ocelot->stats_layout[j].offset);
+ for (j = 0; j < region->count; j++) {
+ u64 *stat = &ocelot->stats[idx + j];
+ u64 val = region->buf[j];
- if (val < (ocelot->stats[idx] & U32_MAX))
- ocelot->stats[idx] += (u64)1 << 32;
+ if (val < (*stat & U32_MAX))
+ *stat += (u64)1 << 32;
- ocelot->stats[idx] = (ocelot->stats[idx] &
- ~(u64)U32_MAX) + val;
+ *stat = (*stat & ~(u64)U32_MAX) + val;
}
+
+ idx += region->count;
}
+
+ return err;
}
static void ocelot_check_stats_work(struct work_struct *work)
@@ -1779,29 +1904,40 @@ static void ocelot_check_stats_work(struct work_struct *work)
struct delayed_work *del_work = to_delayed_work(work);
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
+ int i, err;
mutex_lock(&ocelot->stats_lock);
- ocelot_update_stats(ocelot);
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ err = ocelot_port_update_stats(ocelot, i);
+ if (err)
+ break;
+ }
mutex_unlock(&ocelot->stats_lock);
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
+
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
}
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
- int i;
+ int i, err;
mutex_lock(&ocelot->stats_lock);
/* check and update now */
- ocelot_update_stats(ocelot);
+ err = ocelot_port_update_stats(ocelot, port);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
mutex_unlock(&ocelot->stats_lock);
+
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
@@ -1814,6 +1950,41 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
}
EXPORT_SYMBOL(ocelot_get_sset_count);
+static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+{
+ struct ocelot_stats_region *region = NULL;
+ unsigned int last;
+ int i;
+
+ INIT_LIST_HEAD(&ocelot->stats_regions);
+
+ for (i = 0; i < ocelot->num_stats; i++) {
+ if (region && ocelot->stats_layout[i].offset == last + 1) {
+ region->count++;
+ } else {
+ region = devm_kzalloc(ocelot->dev, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->offset = ocelot->stats_layout[i].offset;
+ region->count = 1;
+ list_add_tail(&region->node, &ocelot->stats_regions);
+ }
+
+ last = ocelot->stats_layout[i].offset;
+ }
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ region->buf = devm_kcalloc(ocelot->dev, region->count,
+ sizeof(*region->buf), GFP_KERNEL);
+ if (!region->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info)
{
@@ -1847,6 +2018,8 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
u32 mask = 0;
int port;
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -1860,6 +2033,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
return mask;
}
+/* The logical port number of a LAG is equal to the lowest numbered physical
+ * port ID present in that LAG. It may change if that port ever leaves the LAG.
+ */
+static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
+{
+ int bond_mask = ocelot_get_bond_mask(ocelot, bond);
+
+ if (!bond_mask)
+ return -ENOENT;
+
+ return __ffs(bond_mask);
+}
+
u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
@@ -1979,6 +2165,28 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
}
EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
+void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ u16 vid;
+
+ ocelot->ports[port]->is_dsa_8021q_cpu = true;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_add(ocelot, port, vid, true);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu);
+
+void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ u16 vid;
+
+ ocelot->ports[port]->is_dsa_8021q_cpu = false;
+
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_del(ocelot, port, vid);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu);
+
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2123,7 +2331,8 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr,
}
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
@@ -2133,6 +2342,9 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
/* New entry */
@@ -2179,7 +2391,8 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
EXPORT_SYMBOL(ocelot_port_mdb_add);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
@@ -2189,6 +2402,9 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
return -ENOENT;
@@ -2222,18 +2438,30 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
-void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
- struct net_device *bridge)
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge, int bridge_num,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ err = ocelot_single_vlan_aware_bridge(ocelot, extack);
+ if (err)
+ return err;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->bridge = bridge;
+ ocelot_port->bridge_num = bridge_num;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
+
+ if (br_vlan_enabled(bridge))
+ return 0;
+
+ return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
@@ -2244,7 +2472,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
mutex_lock(&ocelot->fwd_domain_lock);
+ if (!br_vlan_enabled(bridge))
+ ocelot_del_vlan_unaware_pvid(ocelot, port, bridge);
+
ocelot_port->bridge = NULL;
+ ocelot_port->bridge_num = -1;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
@@ -2353,7 +2585,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
bond = ocelot_port->bond;
if (bond) {
- int lag = __ffs(ocelot_get_bond_mask(ocelot, bond));
+ int lag = ocelot_bond_get_id(ocelot, bond);
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
@@ -2368,6 +2600,46 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
}
}
+/* Documentation for PORTID_VAL says:
+ * Logical port number for front port. If port is not a member of a LLAG,
+ * then PORTID must be set to the physical port number.
+ * If port is a member of a LLAG, then PORTID must be set to the common
+ * PORTID_VAL used for all member ports of the LLAG.
+ * The value must not exceed the number of physical ports on the device.
+ *
+ * This means we have little choice but to migrate FDB entries pointing towards
+ * a logical port when that changes.
+ */
+static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
+ struct net_device *bond,
+ int lag)
+{
+ struct ocelot_lag_fdb *fdb;
+ int err;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
+ if (fdb->bond != bond)
+ continue;
+
+ err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
+ if (err) {
+ dev_err(ocelot->dev,
+ "failed to delete LAG %s FDB %pM vid %d: %pe\n",
+ bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
+ }
+
+ err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
+ ENTRYTYPE_LOCKED);
+ if (err) {
+ dev_err(ocelot->dev,
+ "failed to migrate LAG %s FDB %pM vid %d: %pe\n",
+ bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
+ }
+ }
+}
+
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
struct netdev_lag_upper_info *info)
@@ -2392,14 +2664,23 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
+ int old_lag_id, new_lag_id;
+
mutex_lock(&ocelot->fwd_domain_lock);
+ old_lag_id = ocelot_bond_get_id(ocelot, bond);
+
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
+ new_lag_id = ocelot_bond_get_id(ocelot, bond);
+
+ if (new_lag_id >= 0 && old_lag_id != new_lag_id)
+ ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
+
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
@@ -2408,13 +2689,83 @@ void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->lag_tx_active = lag_tx_active;
/* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_change);
+int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge)
+{
+ struct ocelot_lag_fdb *fdb;
+ int lag, err;
+
+ fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
+ if (!fdb)
+ return -ENOMEM;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ ether_addr_copy(fdb->addr, addr);
+ fdb->vid = vid;
+ fdb->bond = bond;
+
+ lag = ocelot_bond_get_id(ocelot, bond);
+
+ err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
+ if (err) {
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ kfree(fdb);
+ return err;
+ }
+
+ list_add_tail(&fdb->list, &ocelot->lag_fdbs);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
+
+int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
+ const unsigned char *addr, u16 vid,
+ const struct net_device *bridge)
+{
+ struct ocelot_lag_fdb *fdb, *tmp;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (!vid)
+ vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
+
+ list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
+ if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
+ fdb->bond != bond)
+ continue;
+
+ ocelot_mact_forget(ocelot, addr, vid);
+ list_del(&fdb->list);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ kfree(fdb);
+
+ return 0;
+ }
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
+
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
@@ -2535,6 +2886,9 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
if (flags.mask & BR_LEARNING)
ocelot_port_set_learning(ocelot, port,
!!(flags.val & BR_LEARNING));
@@ -2553,6 +2907,198 @@ void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_port_bridge_flags);
+int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
+{
+ int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+
+ return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
+
+int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
+{
+ if (prio >= OCELOT_NUM_TC)
+ return -ERANGE;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
+ ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
+ ANA_PORT_QOS_CFG,
+ port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
+
+int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
+{
+ int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+
+ /* Return error if DSCP prioritization isn't enabled */
+ if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
+ return -EOPNOTSUPP;
+
+ if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
+ dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
+ /* Re-read ANA_DSCP_CFG for the translated DSCP */
+ dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+ }
+
+ /* If the DSCP value is not trusted, the QoS classification falls back
+ * to VLAN PCP or port-based default.
+ */
+ if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
+ return -EOPNOTSUPP;
+
+ return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
+
+int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+ int mask, val;
+
+ if (prio >= OCELOT_NUM_TC)
+ return -ERANGE;
+
+ /* There is at least one app table priority (this one), so we need to
+ * make sure DSCP prioritization is enabled on the port.
+ * Also make sure DSCP translation is disabled
+ * (dcbnl doesn't support it).
+ */
+ mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+ ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
+ ANA_PORT_QOS_CFG, port);
+
+ /* Trust this DSCP value and map it to the given QoS class */
+ val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
+
+ ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
+
+int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
+{
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
+ int mask, i;
+
+ /* During a "dcb app replace" command, the new app table entry will be
+ * added first, then the old one will be deleted. But the hardware only
+ * supports one QoS class per DSCP value (duh), so if we blindly delete
+ * the app table entry for this DSCP value, we end up deleting the
+ * entry with the new priority. Avoid that by checking whether user
+ * space wants to delete the priority which is currently configured, or
+ * something else which is no longer current.
+ */
+ if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
+ return 0;
+
+ /* Untrust this DSCP value */
+ ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
+
+ for (i = 0; i < 64; i++) {
+ int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
+
+ /* There are still app table entries on the port, so we need to
+ * keep DSCP enabled, nothing to do.
+ */
+ if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
+ return 0;
+ }
+
+ /* Disable DSCP QoS classification if there isn't any trusted
+ * DSCP value left.
+ */
+ mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
+ ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
+
+ ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_mirror *m = ocelot->mirror;
+
+ if (m) {
+ if (m->to != to) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring already configured towards different egress port");
+ return ERR_PTR(-EBUSY);
+ }
+
+ refcount_inc(&m->refcount);
+ return m;
+ }
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return ERR_PTR(-ENOMEM);
+
+ m->to = to;
+ refcount_set(&m->refcount, 1);
+ ocelot->mirror = m;
+
+ /* Program the mirror port to hardware */
+ ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
+
+ return m;
+}
+
+void ocelot_mirror_put(struct ocelot *ocelot)
+{
+ struct ocelot_mirror *m = ocelot->mirror;
+
+ if (!refcount_dec_and_test(&m->refcount))
+ return;
+
+ ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
+ ocelot->mirror = NULL;
+ kfree(m);
+}
+
+int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
+
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ if (ingress) {
+ ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG, from);
+ } else {
+ ocelot_rmw(ocelot, BIT(from), BIT(from),
+ ANA_EMIRRORPORTS);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
+
+void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
+{
+ if (ingress) {
+ ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
+ ANA_PORT_PORT_CFG, from);
+ } else {
+ ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
+ }
+
+ ocelot_mirror_put(ocelot);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2647,7 +3193,7 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
@@ -2709,6 +3255,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
INIT_LIST_HEAD(&ocelot->vlans);
+ INIT_LIST_HEAD(&ocelot->lag_fdbs);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
@@ -2814,6 +3361,13 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
+ ret = ocelot_prepare_stats_regions(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->stats_queue);
+ destroy_workqueue(ocelot->owq);
+ return ret;
+ }
+
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index bf4eff6d7086..d0fa8ab6cc81 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -21,11 +21,12 @@
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
-#define OCELOT_VLAN_UNAWARE_PVID 0
+#define OCELOT_STANDALONE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
@@ -37,7 +38,8 @@
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
-
+ unsigned long ingress_mirred_id;
+ unsigned long egress_mirred_id;
unsigned long police_id;
};
@@ -80,6 +82,9 @@ struct ocelot_multicast {
struct ocelot_pgid *pgid;
};
+int ocelot_bridge_num_find(struct ocelot *ocelot,
+ const struct net_device *bridge);
+
int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data);
int ocelot_mact_learn(struct ocelot *ocelot, int port,
@@ -102,6 +107,15 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f));
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie);
+
+struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
+ struct netlink_ext_ack *extack);
+void ocelot_mirror_put(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index fdb4d7e7296c..03b5e59d033e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -6,6 +6,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_police.h"
#include "ocelot_vcap.h"
/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
@@ -231,6 +232,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
+ const struct flow_action *action = &f->rule->action;
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
@@ -258,7 +260,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->goto_target = -1;
filter->type = OCELOT_VCAP_FILTER_DUMMY;
- flow_action_for_each(i, a, &f->rule->action) {
+ flow_action_for_each(i, a, action) {
switch (a->id) {
case FLOW_ACTION_DROP:
if (filter->block_id != VCAP_IS2) {
@@ -293,6 +295,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ list_add_tail(&filter->trap_list, &ocelot->traps);
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
@@ -310,11 +313,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- if (a->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ err = ocelot_policer_validate(action, a, extack);
+ if (err)
+ return err;
+
filter->action.police_ena = true;
pol_ix = a->hw_index + ocelot->vcap_pol.base;
@@ -356,6 +359,27 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.port_mask = BIT(egress_port);
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_MIRRED:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->ops->netdev_to_port(a->dev);
+ if (egress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+ filter->egress_port.value = egress_port;
+ filter->action.mirror_ena = true;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
case FLOW_ACTION_VLAN_POP:
if (filter->block_id != VCAP_IS1) {
NL_SET_ERR_MSG_MOD(extack,
@@ -854,6 +878,8 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
+ if (!list_empty(&filter->trap_list))
+ list_del(&filter->trap_list);
kfree(filter);
return ret;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 7390fa3980ec..2067382d0ee1 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -10,6 +10,19 @@
#include "ocelot.h"
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
+ int count)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ return regmap_bulk_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ buf, count);
+}
+EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix);
+
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
{
u16 target = reg >> TARGET_OFFSET;
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 1fa58546abdc..3ccec488a304 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -60,7 +60,7 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = 1;
- filter->id.cookie = src_port;
+ filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port);
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -77,55 +77,46 @@ static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
return err;
}
-static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port,
- int prio, unsigned long cookie)
+static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter)
{
const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
- struct ocelot_vcap_filter *filter;
- int err;
-
- filter = kzalloc(sizeof(*filter), GFP_KERNEL);
- if (!filter)
- return -ENOMEM;
- filter->key_type = OCELOT_VCAP_KEY_ETYPE;
- filter->prio = prio;
- filter->id.cookie = cookie;
- filter->id.tc_offload = false;
- filter->block_id = VCAP_IS2;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- filter->ingress_port_mask = BIT(port);
/* Here is possible to use control or test dmac because the mask
* doesn't cover the LSB
*/
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
- filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- filter->action.port_mask = 0x0;
- filter->action.cpu_copy_ena = true;
- filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+}
- err = ocelot_vcap_filter_add(ocelot, filter, NULL);
- if (err)
- kfree(filter);
+static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
- return err;
+ return ocelot_trap_add(ocelot, port, cookie, false,
+ ocelot_populate_mrp_trap_key);
+}
+
+static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, cookie);
}
static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
- ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_STANDALONE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_STANDALONE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
@@ -186,7 +177,7 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
ocelot_mrp_save_mac(ocelot, ocelot_port);
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
- return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port);
+ return ocelot_mrp_trap_add(ocelot, port);
dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
if (dst_port == -1)
@@ -196,10 +187,10 @@ int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
if (err)
return err;
- err = ocelot_mrp_copy_add_vcap(ocelot, port, 2,
- port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_add(ocelot, port);
if (err) {
- ocelot_mrp_del_vcap(ocelot, port);
+ ocelot_mrp_del_vcap(ocelot,
+ OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
return err;
}
@@ -211,7 +202,7 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int i;
+ int err, i;
if (!ocelot_port)
return -EOPNOTSUPP;
@@ -222,8 +213,11 @@ int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- ocelot_mrp_del_vcap(ocelot, port);
- ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
for (i = 0; i < ocelot->num_phys_ports; ++i) {
ocelot_port = ocelot->ports[i];
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index e271b6225b72..247bc105bdd2 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -14,11 +14,14 @@
#include <linux/phy/phy.h>
#include <net/pkt_cls.h>
#include "ocelot.h"
+#include "ocelot_police.h"
#include "ocelot_vcap.h"
#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
+static bool ocelot_netdevice_dev_check(const struct net_device *dev);
+
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
{
return devlink_priv(dlp->devlink);
@@ -215,14 +218,14 @@ int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
}
}
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
- struct tc_cls_matchall_offload *f,
- bool ingress)
+static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
- struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *action = &f->rule->action.entries[0];
struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
- struct flow_action_entry *action;
int port = priv->chip_port;
int err;
@@ -231,6 +234,119 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
return -EOPNOTSUPP;
}
+ if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ err = ocelot_policer_validate(&f->rule->action, action, extack);
+ if (err)
+ return err;
+
+ pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = action->police.burst;
+
+ err = ocelot_port_policer_add(ocelot, port, &pol);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
+ return err;
+ }
+
+ priv->tc.police_id = f->cookie;
+ priv->tc.offload_cnt++;
+
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action *action = &f->rule->action;
+ struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port_private *other_priv;
+ const struct flow_action_entry *a;
+ int err;
+
+ if (f->common.protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_check(action, extack))
+ return -EOPNOTSUPP;
+
+ a = &action->entries[0];
+ if (!a->dev)
+ return -EINVAL;
+
+ if (!ocelot_netdevice_dev_check(a->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+
+ other_priv = netdev_priv(a->dev);
+
+ err = ocelot_port_mirror_add(ocelot, priv->chip_port,
+ other_priv->chip_port, ingress, extack);
+ if (err)
+ return err;
+
+ if (ingress)
+ priv->tc.ingress_mirred_id = f->cookie;
+ else
+ priv->tc.egress_mirred_id = f->cookie;
+ priv->tc.offload_cnt++;
+
+ return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+ int err;
+
+ err = ocelot_port_policer_del(ocelot, port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not delete policer");
+ return err;
+ }
+
+ priv->tc.police_id = 0;
+ priv->tc.offload_cnt--;
+
+ return 0;
+}
+
+static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ ocelot_port_mirror_del(ocelot, port, ingress);
+
+ if (ingress)
+ priv->tc.ingress_mirred_id = 0;
+ else
+ priv->tc.egress_mirred_id = 0;
+ priv->tc.offload_cnt--;
+
+ return 0;
+}
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_action_entry *action;
+
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
if (!flow_offload_has_one_action(&f->rule->action)) {
@@ -241,54 +357,41 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
- "Rate limit is not supported on shared blocks");
+ "Matchall offloads not supported on shared blocks");
return -EOPNOTSUPP;
}
action = &f->rule->action.entries[0];
- if (action->id != FLOW_ACTION_POLICE) {
+ switch (action->id) {
+ case FLOW_ACTION_POLICE:
+ return ocelot_setup_tc_cls_matchall_police(priv, f,
+ ingress,
+ extack);
+ break;
+ case FLOW_ACTION_MIRRED:
+ return ocelot_setup_tc_cls_matchall_mirred(priv, f,
+ ingress,
+ extack);
+ default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
- if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
- NL_SET_ERR_MSG_MOD(extack,
- "Only one policer per port is supported");
- return -EEXIST;
- }
-
- if (action->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
-
- pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
- pol.burst = action->police.burst;
-
- err = ocelot_port_policer_add(ocelot, port, &pol);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
- return err;
- }
-
- priv->tc.police_id = f->cookie;
- priv->tc.offload_cnt++;
- return 0;
+ break;
case TC_CLSMATCHALL_DESTROY:
- if (priv->tc.police_id != f->cookie)
+ action = &f->rule->action.entries[0];
+
+ if (f->cookie == priv->tc.police_id)
+ return ocelot_del_tc_cls_matchall_police(priv, extack);
+ else if (f->cookie == priv->tc.ingress_mirred_id ||
+ f->cookie == priv->tc.egress_mirred_id)
+ return ocelot_del_tc_cls_matchall_mirred(priv, ingress,
+ extack);
+ else
return -ENOENT;
- err = ocelot_port_policer_del(ocelot, port);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Could not delete policer");
- return err;
- }
- priv->tc.police_id = 0;
- priv->tc.offload_cnt--;
- return 0;
+ break;
case TC_CLSMATCHALL_STATS:
default:
return -EOPNOTSUPP;
@@ -419,7 +522,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == OCELOT_VLAN_UNAWARE_PVID)
+ if (vid == OCELOT_STANDALONE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -559,7 +662,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
+ w.forget.vid = OCELOT_STANDALONE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -573,7 +676,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
+ w.learn.vid = OCELOT_STANDALONE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -608,9 +711,9 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_STANDALONE_PVID);
eth_hw_addr_set(dev, addr->sa_data);
return 0;
@@ -662,10 +765,11 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
@@ -673,10 +777,11 @@ static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr, u16 vid)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_dump(struct sk_buff *skb,
@@ -988,7 +1093,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_del_mdb(struct net_device *dev,
@@ -999,7 +1104,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_mrp_add(struct net_device *dev,
@@ -1173,6 +1278,33 @@ static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
return 0;
}
+static int ocelot_bridge_num_get(struct ocelot *ocelot,
+ const struct net_device *bridge_dev)
+{
+ int bridge_num = ocelot_bridge_num_find(ocelot, bridge_dev);
+
+ if (bridge_num < 0) {
+ /* First port that offloads this bridge */
+ bridge_num = find_first_zero_bit(&ocelot->bridges,
+ ocelot->num_phys_ports);
+
+ set_bit(bridge_num, &ocelot->bridges);
+ }
+
+ return bridge_num;
+}
+
+static void ocelot_bridge_num_put(struct ocelot *ocelot,
+ const struct net_device *bridge_dev,
+ int bridge_num)
+{
+ /* Check if the bridge is still in use, otherwise it is time
+ * to clean it up so we can reuse this bridge_num later.
+ */
+ if (!ocelot_bridge_num_find(ocelot, bridge_dev))
+ clear_bit(bridge_num, &ocelot->bridges);
+}
+
static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge,
@@ -1182,9 +1314,14 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- int err;
+ int bridge_num, err;
+
+ bridge_num = ocelot_bridge_num_get(ocelot, bridge);
- ocelot_port_bridge_join(ocelot, port, bridge);
+ err = ocelot_port_bridge_join(ocelot, port, bridge, bridge_num,
+ extack);
+ if (err)
+ goto err_join;
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
&ocelot_switchdev_nb,
@@ -1205,6 +1342,8 @@ err_switchdev_sync:
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
+err_join:
+ ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return err;
}
@@ -1225,6 +1364,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
+ int bridge_num = ocelot_port->bridge_num;
int port = priv->chip_port;
int err;
@@ -1233,6 +1373,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev,
return err;
ocelot_port_bridge_leave(ocelot, port, bridge);
+ ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return 0;
}
@@ -1700,7 +1841,7 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
+ OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index 6f5068c1041a..a65606bb84a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -154,6 +154,47 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
return 0;
}
+int ocelot_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ if (a->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ a->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, a)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but police action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.peakrate_bytes_ps ||
+ a->police.avrate || a->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (a->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload does not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_policer_validate);
+
int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index 7adb05f71999..7552995f8b17 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -8,6 +8,7 @@
#define _MSCC_OCELOT_POLICE_H_
#include "ocelot.h"
+#include <net/flow_offload.h>
enum mscc_qos_rate_mode {
MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
@@ -33,4 +34,8 @@ struct qos_policer_conf {
int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
struct qos_policer_conf *conf);
+int ocelot_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack);
+
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index d3544413a8a4..c8701ac955a8 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -335,6 +335,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_MIRROR_ENA, a->mirror_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
@@ -564,9 +565,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
- if (msk == 0xff && (val == 6 || val == 17)) {
+ if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) {
/* UDP/TCP protocol match */
- tcp = (val == 6 ?
+ tcp = (val == IPPROTO_TCP ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
vcap_key_l4_port_set(vcap, &data,
@@ -955,14 +956,21 @@ int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
}
EXPORT_SYMBOL(ocelot_vcap_policer_del);
-static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- struct ocelot_vcap_filter *filter)
+static int
+ocelot_vcap_filter_add_aux_resources(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
{
- struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *n;
+ struct ocelot_mirror *m;
int ret;
+ if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) {
+ m = ocelot_mirror_get(ocelot, filter->egress_port.value,
+ extack);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+ }
+
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
&filter->action.pol);
@@ -970,6 +978,33 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
return ret;
}
+ return 0;
+}
+
+static void
+ocelot_vcap_filter_del_aux_resources(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ if (filter->block_id == VCAP_IS2 && filter->action.police_ena)
+ ocelot_vcap_policer_del(ocelot, filter->action.pol_ix);
+
+ if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena)
+ ocelot_mirror_put(ocelot);
+}
+
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_vcap_filter *tmp;
+ struct list_head *pos, *n;
+ int ret;
+
+ ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
+ if (ret)
+ return ret;
+
block->count++;
if (list_empty(&block->rules)) {
@@ -1168,7 +1203,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
}
/* Add filter to the linked list */
- ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter, extack);
if (ret)
return ret;
@@ -1195,18 +1230,12 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_filter *tmp;
- struct list_head *pos, *q;
+ struct ocelot_vcap_filter *tmp, *n;
- list_for_each_safe(pos, q, &block->rules) {
- tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+ list_for_each_entry_safe(tmp, n, &block->rules, list) {
if (ocelot_vcap_filter_equal(filter, tmp)) {
- if (tmp->block_id == VCAP_IS2 &&
- tmp->action.police_ena)
- ocelot_vcap_policer_del(ocelot,
- tmp->action.pol_ix);
-
- list_del(pos);
+ ocelot_vcap_filter_del_aux_resources(ocelot, tmp);
+ list_del(&tmp->list);
kfree(tmp);
}
}
@@ -1401,6 +1430,7 @@ int ocelot_vcap_init(struct ocelot *ocelot)
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->traps);
INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 9cff3d48acbc..9c0861d03634 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -5,6 +5,7 @@ nfp-objs := \
nfpcore/nfp6000_pcie.o \
nfpcore/nfp_cppcore.o \
nfpcore/nfp_cpplib.o \
+ nfpcore/nfp_dev.o \
nfpcore/nfp_hwinfo.o \
nfpcore/nfp_mip.o \
nfpcore/nfp_mutex.o \
@@ -19,18 +20,25 @@ nfp-objs := \
ccm_mbox.o \
devlink_param.o \
nfp_asm.o \
+ nfd3/dp.o \
+ nfd3/rings.o \
+ nfd3/xsk.o \
+ nfdk/dp.o \
+ nfdk/rings.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
+ nfp_net_dp.o \
nfp_net_ctrl.o \
nfp_net_debugdump.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
nfp_net_sriov.o \
+ nfp_net_xsk.o \
nfp_netvf_main.o \
nfp_port.o \
nfp_shared_buf.o \
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index a3242b36e216..1b9421e844a9 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -922,6 +922,51 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
}
+static struct nfp_fl_meter *nfp_fl_meter(char *act_data)
+{
+ size_t act_size = sizeof(struct nfp_fl_meter);
+ struct nfp_fl_meter *meter_act;
+
+ meter_act = (struct nfp_fl_meter *)act_data;
+
+ memset(meter_act, 0, act_size);
+
+ meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER;
+ meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ return meter_act;
+}
+
+static int
+nfp_flower_meter_action(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ struct nfp_fl_payload *nfp_fl, int *a_len,
+ struct net_device *netdev,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_fl_meter *fl_meter;
+ u32 meter_id;
+
+ if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload:meter action size beyond the allowed maximum");
+ return -EOPNOTSUPP;
+ }
+
+ meter_id = action->hw_index;
+ if (!nfp_flower_search_meter_entry(app, meter_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can not offload flow table with unsupported police action.");
+ return -EOPNOTSUPP;
+ }
+
+ fl_meter = nfp_fl_meter(&nfp_fl->action_data[*a_len]);
+ *a_len += sizeof(struct nfp_fl_meter);
+ fl_meter->meter_id = cpu_to_be32(meter_id);
+
+ return 0;
+}
+
static int
nfp_flower_output_action(struct nfp_app *app,
const struct flow_action_entry *act,
@@ -985,6 +1030,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
+ struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
@@ -1149,6 +1195,18 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*pkt_host = true;
break;
+ case FLOW_ACTION_POLICE:
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: unsupported police action in action list");
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev,
+ extack);
+ if (err)
+ return err;
+ break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 1543e47456d5..68e8a2fb1a29 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -85,6 +85,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_METER 24
#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
@@ -260,6 +261,12 @@ struct nfp_fl_set_mpls {
__be32 lse;
};
+struct nfp_fl_meter {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 meter_id;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index ac1dcfa1d179..4d960a9641b3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -266,7 +266,7 @@ nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
int i, err, count = 0;
reprs = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
if (!reprs)
return 0;
@@ -295,7 +295,7 @@ nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
if (!tot_repl)
return 0;
- lockdep_assert_held(&app->pf->lock);
+ assert_nfp_app_locked(app);
if (!wait_event_timeout(priv->reify_wait_queue,
atomic_read(replies) >= tot_repl,
NFP_FL_REPLY_TIMEOUT)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 917c450a7aad..fa902ce2dd82 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -12,7 +12,9 @@
#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <net/flow_offload.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <linux/workqueue.h>
#include <linux/idr.h>
@@ -48,6 +50,7 @@ struct nfp_app;
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_QOS_PPS BIT(9)
+#define NFP_FL_FEATS_QOS_METER BIT(10)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -63,7 +66,8 @@ struct nfp_app;
NFP_FL_FEATS_PRE_TUN_RULES | \
NFP_FL_FEATS_IPV6_TUN | \
NFP_FL_FEATS_VLAN_QINQ | \
- NFP_FL_FEATS_QOS_PPS)
+ NFP_FL_FEATS_QOS_PPS | \
+ NFP_FL_FEATS_QOS_METER)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
@@ -191,6 +195,8 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
+ * @meter_stats_lock: Lock on meter stats updates
+ * @meter_table: Hash table used to store the meter table
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
@@ -228,6 +234,8 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
+ struct mutex meter_stats_lock; /* Protect the meter stats */
+ struct rhashtable meter_table;
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
@@ -374,6 +382,31 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
+struct nfp_meter_stats_entry {
+ u64 pkts;
+ u64 bytes;
+ u64 drops;
+};
+
+struct nfp_meter_entry {
+ struct rhash_head ht_node;
+ u32 meter_id;
+ bool bps;
+ u32 rate;
+ u32 burst;
+ u64 used;
+ struct nfp_meter_stats {
+ u64 update;
+ struct nfp_meter_stats_entry curr;
+ struct nfp_meter_stats_entry prev;
+ } stats;
+};
+
+enum nfp_meter_op {
+ NFP_METER_ADD,
+ NFP_METER_DEL,
+};
+
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
@@ -569,4 +602,18 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
+
+int nfp_setup_tc_act_offload(struct nfp_app *app,
+ struct flow_offload_action *fl_act);
+int nfp_init_meter_table(struct nfp_app *app);
+void nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv);
+void nfp_act_stats_reply(struct nfp_app *app, void *pmsg);
+int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
+ bool pps, u32 id, u32 rate, u32 burst);
+int nfp_flower_setup_meter_entry(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ enum nfp_meter_op op,
+ u32 meter_id);
+struct nfp_meter_entry *
+nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index f97eff5afd12..92e8ade4854e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1861,6 +1861,20 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
return 0;
}
+static int
+nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return nfp_setup_tc_act_offload(app, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -1868,7 +1882,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return nfp_setup_tc_no_dev(cb_priv, type, data);
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 784c6dbf8bc4..3206ba83b1aa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
#include <linux/math64.h>
+#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
@@ -11,10 +15,14 @@
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
#define NFP_FL_QOS_PPS BIT(15)
+#define NFP_FL_QOS_METER BIT(10)
struct nfp_police_cfg_head {
__be32 flags_opts;
- __be32 port;
+ union {
+ __be32 meter_id;
+ __be32 port;
+ };
};
enum NFP_FL_QOS_TYPES {
@@ -46,7 +54,15 @@ enum NFP_FL_QOS_TYPES {
* | Committed Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Word[0](FLag options):
- * [15] p(pps) 1 for pps ,0 for bps
+ * [15] p(pps) 1 for pps, 0 for bps
+ *
+ * Meter control message
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-------------------------------+-+---+-----+-+---------+-+---+-+
+ * | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R|
+ * +-------------------------------+-+---+-----+-+---------+-+---+-+
+ * | meter ID |
+ * +-------------------------------+-------------------------------+
*
*/
struct nfp_police_config {
@@ -67,6 +83,74 @@ struct nfp_police_stats_reply {
__be64 drop_pkts;
};
+int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
+ bool pps, u32 id, u32 rate, u32 burst)
+{
+ struct nfp_police_config *config;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ if (pps)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
+ if (!ingress)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
+
+ if (ingress)
+ config->head.port = cpu_to_be32(id);
+ else
+ config->head.meter_id = cpu_to_be32(id);
+
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(app->ctrl, skb);
+
+ return 0;
+}
+
+static int nfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int
nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
@@ -77,15 +161,15 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *fl_priv = app->priv;
struct flow_action_entry *action = NULL;
struct nfp_flower_repr_priv *repr_priv;
- struct nfp_police_config *config;
u32 netdev_port_id, i;
struct nfp_repr *repr;
- struct sk_buff *skb;
bool pps_support;
u32 bps_num = 0;
u32 pps_num = 0;
u32 burst;
+ bool pps;
u64 rate;
+ int err;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
@@ -132,6 +216,11 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
"unsupported offload: qos rate limit offload requires police action");
return -EOPNOTSUPP;
}
+
+ err = nfp_policer_validate(&flow->rule->action, action, extack);
+ if (err)
+ return err;
+
if (action->police.rate_bytes_ps > 0) {
if (bps_num++) {
NL_SET_ERR_MSG_MOD(extack,
@@ -169,23 +258,12 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
}
if (rate != 0) {
- skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
- NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- config = nfp_flower_cmsg_get_data(skb);
- memset(config, 0, sizeof(struct nfp_police_config));
+ pps = false;
if (action->police.rate_pkt_ps > 0)
- config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
- config->head.port = cpu_to_be32(netdev_port_id);
- config->bkt_tkn_p = cpu_to_be32(burst);
- config->bkt_tkn_c = cpu_to_be32(burst);
- config->pbs = cpu_to_be32(burst);
- config->cbs = cpu_to_be32(burst);
- config->pir = cpu_to_be32(rate);
- config->cir = cpu_to_be32(rate);
- nfp_ctrl_tx(repr->app->ctrl, skb);
+ pps = true;
+ nfp_flower_offload_one_police(repr->app, true,
+ pps, netdev_port_id,
+ rate, burst);
}
}
repr_priv->qos_table.netdev_port_id = netdev_port_id;
@@ -266,6 +344,9 @@ void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
u32 netdev_port_id;
msg = nfp_flower_cmsg_get_data(skb);
+ if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
+ return nfp_act_stats_reply(app, msg);
+
netdev_port_id = be32_to_cpu(msg->head.port);
rcu_read_lock();
netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
@@ -297,7 +378,7 @@ exit_unlock_rcu:
static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
- u32 netdev_port_id)
+ u32 id, bool ingress)
{
struct nfp_police_cfg_head *head;
struct sk_buff *skb;
@@ -308,10 +389,15 @@ nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
GFP_ATOMIC);
if (!skb)
return;
-
head = nfp_flower_cmsg_get_data(skb);
+
memset(head, 0, sizeof(struct nfp_police_cfg_head));
- head->port = cpu_to_be32(netdev_port_id);
+ if (ingress) {
+ head->port = cpu_to_be32(id);
+ } else {
+ head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
+ head->meter_id = cpu_to_be32(id);
+ }
nfp_ctrl_tx(fl_priv->app->ctrl, skb);
}
@@ -341,7 +427,8 @@ nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
if (!netdev_port_id)
continue;
- nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
+ nfp_flower_stats_rlim_request(fl_priv,
+ netdev_port_id, true);
}
}
@@ -359,6 +446,8 @@ static void update_stats_cache(struct work_struct *work)
qos_stats_work);
nfp_flower_stats_rlim_request_all(fl_priv);
+ nfp_flower_stats_meter_request_all(fl_priv);
+
schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
}
@@ -406,6 +495,9 @@ void nfp_flower_qos_init(struct nfp_app *app)
struct nfp_flower_priv *fl_priv = app->priv;
spin_lock_init(&fl_priv->qos_stats_lock);
+ mutex_init(&fl_priv->meter_stats_lock);
+ nfp_init_meter_table(app);
+
INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
}
@@ -441,3 +533,333 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+/* offload tc action, currently only for tc police */
+
+static const struct rhashtable_params stats_meter_table_params = {
+ .key_offset = offsetof(struct nfp_meter_entry, meter_id),
+ .head_offset = offsetof(struct nfp_meter_entry, ht_node),
+ .key_len = sizeof(u32),
+};
+
+struct nfp_meter_entry *
+nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
+ stats_meter_table_params);
+}
+
+static struct nfp_meter_entry *
+nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_flower_priv *priv = app->priv;
+
+ meter_entry = rhashtable_lookup_fast(&priv->meter_table,
+ &meter_id,
+ stats_meter_table_params);
+ if (meter_entry)
+ return meter_entry;
+
+ meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
+ if (!meter_entry)
+ return NULL;
+
+ meter_entry->meter_id = meter_id;
+ meter_entry->used = jiffies;
+ if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
+ stats_meter_table_params)) {
+ kfree(meter_entry);
+ return NULL;
+ }
+
+ priv->qos_rate_limiters++;
+ if (priv->qos_rate_limiters == 1)
+ schedule_delayed_work(&priv->qos_stats_work,
+ NFP_FL_QOS_UPDATE);
+
+ return meter_entry;
+}
+
+static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_flower_priv *priv = app->priv;
+
+ meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
+ stats_meter_table_params);
+ if (!meter_entry)
+ return;
+
+ rhashtable_remove_fast(&priv->meter_table,
+ &meter_entry->ht_node,
+ stats_meter_table_params);
+ kfree(meter_entry);
+ priv->qos_rate_limiters--;
+ if (!priv->qos_rate_limiters)
+ cancel_delayed_work_sync(&priv->qos_stats_work);
+}
+
+int nfp_flower_setup_meter_entry(struct nfp_app *app,
+ const struct flow_action_entry *action,
+ enum nfp_meter_op op,
+ u32 meter_id)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ int err = 0;
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+
+ switch (op) {
+ case NFP_METER_DEL:
+ nfp_flower_del_meter_entry(app, meter_id);
+ goto exit_unlock;
+ case NFP_METER_ADD:
+ meter_entry = nfp_flower_add_meter_entry(app, meter_id);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
+ if (!meter_entry) {
+ err = -ENOMEM;
+ goto exit_unlock;
+ }
+
+ if (action->police.rate_bytes_ps > 0) {
+ meter_entry->bps = true;
+ meter_entry->rate = action->police.rate_bytes_ps;
+ meter_entry->burst = action->police.burst;
+ } else {
+ meter_entry->bps = false;
+ meter_entry->rate = action->police.rate_pkt_ps;
+ meter_entry->burst = action->police.burst_pkt;
+ }
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+ return err;
+}
+
+int nfp_init_meter_table(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+
+ return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
+}
+
+void
+nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct rhashtable_iter iter;
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+ rhashtable_walk_enter(&fl_priv->meter_table, &iter);
+ rhashtable_walk_start(&iter);
+
+ while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(meter_entry))
+ continue;
+ nfp_flower_stats_rlim_request(fl_priv,
+ meter_entry->meter_id, false);
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ mutex_unlock(&fl_priv->meter_stats_lock);
+}
+
+static int
+nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *paction = &fl_act->action.entries[0];
+ u32 action_num = fl_act->action.num_entries;
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct flow_action_entry *action = NULL;
+ u32 burst, i, meter_id;
+ bool pps_support, pps;
+ bool add = false;
+ u64 rate;
+
+ pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
+
+ for (i = 0 ; i < action_num; i++) {
+ /*set qos associate data for this interface */
+ action = paction + i;
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ continue;
+ }
+ if (action->police.rate_bytes_ps > 0) {
+ rate = action->police.rate_bytes_ps;
+ burst = action->police.burst;
+ } else if (action->police.rate_pkt_ps > 0 && pps_support) {
+ rate = action->police.rate_pkt_ps;
+ burst = action->police.burst_pkt;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: unsupported qos rate limit");
+ continue;
+ }
+
+ if (rate != 0) {
+ meter_id = action->hw_index;
+ if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
+ continue;
+
+ pps = false;
+ if (action->police.rate_pkt_ps > 0)
+ pps = true;
+ nfp_flower_offload_one_police(app, false, pps, meter_id,
+ rate, burst);
+ add = true;
+ }
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_police_config *config;
+ struct sk_buff *skb;
+ u32 meter_id;
+ bool pps;
+
+ /*delete qos associate data for this interface */
+ if (fl_act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ meter_id = fl_act->index;
+ meter_entry = nfp_flower_search_meter_entry(app, meter_id);
+ if (!meter_entry) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "no meter entry when delete the action index.");
+ return -ENOENT;
+ }
+ pps = !meter_entry->bps;
+
+ skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
+ config->head.meter_id = cpu_to_be32(meter_id);
+ if (pps)
+ config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
+
+ return 0;
+}
+
+void
+nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ struct nfp_police_stats_reply *msg = pmsg;
+ u32 meter_id;
+
+ meter_id = be32_to_cpu(msg->head.meter_id);
+ mutex_lock(&fl_priv->meter_stats_lock);
+
+ meter_entry = nfp_flower_search_meter_entry(app, meter_id);
+ if (!meter_entry)
+ goto exit_unlock;
+
+ meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
+ be64_to_cpu(msg->drop_pkts);
+ meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
+ be64_to_cpu(msg->drop_bytes);
+ meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
+ if (!meter_entry->stats.update) {
+ meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
+ meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
+ meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
+ }
+
+ meter_entry->stats.update = jiffies;
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+}
+
+static int
+nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_meter_entry *meter_entry = NULL;
+ u64 diff_bytes, diff_pkts, diff_drops;
+ int err = 0;
+
+ if (fl_act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&fl_priv->meter_stats_lock);
+ meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
+ if (!meter_entry) {
+ err = -ENOENT;
+ goto exit_unlock;
+ }
+ diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
+ meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
+ diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
+ meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
+ diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
+ meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
+
+ flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
+ meter_entry->stats.update,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
+ meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
+ meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
+
+exit_unlock:
+ mutex_unlock(&fl_priv->meter_stats_lock);
+ return err;
+}
+
+int nfp_setup_tc_act_offload(struct nfp_app *app,
+ struct flow_offload_action *fl_act)
+{
+ struct netlink_ext_ack *extack = fl_act->extack;
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
+ return -EOPNOTSUPP;
+
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return nfp_act_install_actions(app, fl_act, extack);
+ case FLOW_ACT_DESTROY:
+ return nfp_act_remove_actions(app, fl_act, extack);
+ case FLOW_ACT_STATS:
+ return nfp_act_stats_actions(app, fl_act, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index cb43651ea9ba..c71bd555f482 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -356,7 +356,7 @@ __nfp_tun_add_route_to_cache(struct list_head *route_list,
return 0;
}
- entry = kmalloc(sizeof(*entry) + add_len, GFP_ATOMIC);
+ entry = kmalloc(struct_size(entry, ip_add, add_len), GFP_ATOMIC);
if (!entry) {
spin_unlock_bh(list_lock);
return -ENOMEM;
@@ -942,8 +942,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
if (!nfp_mac_idx) {
/* Assign a global index if non-repr or MAC is now shared. */
if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ ida_idx = ida_alloc_max(&priv->tun.mac_off_ids,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
if (ida_idx < 0)
return ida_idx;
@@ -998,7 +998,7 @@ err_free_entry:
kfree(entry);
err_free_ida:
if (ida_idx != -1)
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
return err;
}
@@ -1061,7 +1061,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
}
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
entry->index = nfp_mac_idx;
return 0;
}
@@ -1081,7 +1081,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC has global ID then extract and free the ida entry. */
if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
- ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+ ida_free(&priv->tun.mac_off_ids, ida_idx);
}
kfree(entry);
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
new file mode 100644
index 000000000000..7db56abaa582
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -0,0 +1,1350 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfd3.h"
+
+/* Transmit processing
+ *
+ * One queue controller peripheral queue is used for transmit. The
+ * driver en-queues packets for transmit by advancing the write
+ * pointer. The device indicates that packets have transmitted by
+ * advancing the read pointer. The driver maintains a local copy of
+ * the read and write pointer in @struct nfp_net_tx_ring. The driver
+ * keeps @wr_p in sync with the queue controller write pointer and can
+ * determine how many packets have been transmitted by comparing its
+ * copy of the read pointer @rd_p with the read pointer maintained by
+ * the queue controller peripheral.
+ */
+
+/* Wrappers for deciding when to stop and restart TX queues */
+static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+ return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
+}
+
+static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+ return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
+}
+
+/**
+ * nfp_nfd3_tx_ring_stop() - stop tx ring
+ * @nd_q: netdev queue
+ * @tx_ring: driver tx queue structure
+ *
+ * Safely stop TX ring. Remember that while we are running .start_xmit()
+ * someone else may be cleaning the TX ring completions so we need to be
+ * extra careful here.
+ */
+static void
+nfp_nfd3_tx_ring_stop(struct netdev_queue *nd_q,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ netif_tx_stop_queue(nd_q);
+
+ /* We can race with the TX completion out of NAPI so recheck */
+ smp_mb();
+ if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring)))
+ netif_tx_start_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to HW TX descriptor
+ * @skb: Pointer to SKB
+ * @md_bytes: Prepend length
+ *
+ * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
+ * Return error on packet header greater than maximum supported LSO header size.
+ */
+static void
+nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
+ struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
+{
+ u32 l3_offset, l4_offset, hdrlen;
+ u16 mss;
+
+ if (!skb_is_gso(skb))
+ return;
+
+ if (!skb->encapsulation) {
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
+ hdrlen = skb_inner_transport_header(skb) - skb->data +
+ inner_tcp_hdrlen(skb);
+ }
+
+ txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
+ txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
+
+ mss = skb_shinfo(skb)->gso_size & NFD3_DESC_TX_MSS_MASK;
+ txd->l3_offset = l3_offset - md_bytes;
+ txd->l4_offset = l4_offset - md_bytes;
+ txd->lso_hdrlen = hdrlen - md_bytes;
+ txd->mss = cpu_to_le16(mss);
+ txd->flags |= NFD3_DESC_TX_LSO;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_lso++;
+ u64_stats_update_end(&r_vec->tx_sync);
+}
+
+/**
+ * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @txbuf: Pointer to driver soft TX descriptor
+ * @txd: Pointer to TX descriptor
+ * @skb: Pointer to SKB
+ *
+ * This function sets the TX checksum flags in the TX descriptor based
+ * on the configuration and the protocol of the packet to be transmitted.
+ */
+static void
+nfp_nfd3_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_nfd3_tx_buf *txbuf, struct nfp_nfd3_tx_desc *txd,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 l4_hdr;
+
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ return;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return;
+
+ txd->flags |= NFD3_DESC_TX_CSUM;
+ if (skb->encapsulation)
+ txd->flags |= NFD3_DESC_TX_ENCAP;
+
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
+ txd->flags |= NFD3_DESC_TX_IP4_CSUM;
+ l4_hdr = iph->protocol;
+ } else if (ipv6h->version == 6) {
+ l4_hdr = ipv6h->nexthdr;
+ } else {
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+ return;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ txd->flags |= NFD3_DESC_TX_TCP_CSUM;
+ break;
+ case IPPROTO_UDP:
+ txd->flags |= NFD3_DESC_TX_UDP_CSUM;
+ break;
+ default:
+ nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
+ return;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (skb->encapsulation)
+ r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
+ else
+ r_vec->hw_csum_tx += txbuf->pkt_cnt;
+ u64_stats_update_end(&r_vec->tx_sync);
+}
+
+static int nfp_nfd3_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+ u32 meta_id = 0;
+ int md_bytes;
+
+ if (likely(!md_dst && !tls_handle))
+ return 0;
+ if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
+ if (!tls_handle)
+ return 0;
+ md_dst = NULL;
+ }
+
+ md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
+
+ if (unlikely(skb_cow_head(skb, md_bytes)))
+ return -ENOMEM;
+
+ meta_id = 0;
+ data = skb_push(skb, md_bytes) + md_bytes;
+ if (md_dst) {
+ data -= 4;
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+ meta_id = NFP_NET_META_PORTID;
+ }
+ if (tls_handle) {
+ /* conn handle is opaque, we just use u64 to be able to quickly
+ * compare it to zero
+ */
+ data -= 8;
+ memcpy(data, &tls_handle, sizeof(tls_handle));
+ meta_id <<= NFP_NET_META_FIELD_SIZE;
+ meta_id |= NFP_NET_META_CONN_HANDLE;
+ }
+
+ data -= 4;
+ put_unaligned_be32(meta_id, data);
+
+ return md_bytes;
+}
+
+/**
+ * nfp_nfd3_tx() - Main transmit entry point
+ * @skb: SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int f, nr_frags, wr_idx, md_bytes;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ struct netdev_queue *nd_q;
+ const skb_frag_t *frag;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ unsigned int fsize;
+ u64 tls_handle = 0;
+ u16 qidx;
+
+ dp = &nn->dp;
+ qidx = skb_get_queue_mapping(skb);
+ tx_ring = &dp->tx_rings[qidx];
+ r_vec = tx_ring->r_vec;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+ netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NETDEV_TX_BUSY;
+ }
+
+ skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
+ if (unlikely(!skb)) {
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ return NETDEV_TX_OK;
+ }
+
+ md_bytes = nfp_nfd3_prep_tx_meta(skb, tls_handle);
+ if (unlikely(md_bytes < 0))
+ goto err_flush;
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_err;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = skb->len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = (nr_frags ? 0 : NFD3_DESC_TX_EOP) | md_bytes;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
+ nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
+ nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+ if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ txd->flags |= NFD3_DESC_TX_VLAN;
+ txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ }
+
+ /* Gather DMA */
+ if (nr_frags > 0) {
+ __le64 second_half;
+
+ /* all descs must match except for in addr, length and eop */
+ second_half = txd->vals8[1];
+
+ for (f = 0; f < nr_frags; f++) {
+ frag = &skb_shinfo(skb)->frags[f];
+ fsize = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
+ fsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_unmap;
+
+ wr_idx = D_IDX(tx_ring, wr_idx + 1);
+ tx_ring->txbufs[wr_idx].skb = skb;
+ tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
+ tx_ring->txbufs[wr_idx].fidx = f;
+
+ txd = &tx_ring->txds[wr_idx];
+ txd->dma_len = cpu_to_le16(fsize);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->offset_eop = md_bytes |
+ ((f == nr_frags - 1) ? NFD3_DESC_TX_EOP : 0);
+ txd->vals8[1] = second_half;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_gather++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ }
+
+ skb_tx_timestamp(skb);
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+
+ tx_ring->wr_p += nr_frags + 1;
+ if (nfp_nfd3_tx_ring_should_stop(tx_ring))
+ nfp_nfd3_tx_ring_stop(nd_q, tx_ring);
+
+ tx_ring->wr_ptr_add += nr_frags + 1;
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_unmap:
+ while (--f >= 0) {
+ frag = &skb_shinfo(skb)->frags[f];
+ dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ tx_ring->txbufs[wr_idx].skb = NULL;
+ tx_ring->txbufs[wr_idx].dma_addr = 0;
+ tx_ring->txbufs[wr_idx].fidx = -2;
+ wr_idx = wr_idx - 1;
+ if (wr_idx < 0)
+ wr_idx += tx_ring->cnt;
+ }
+ dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ tx_ring->txbufs[wr_idx].skb = NULL;
+ tx_ring->txbufs[wr_idx].dma_addr = 0;
+ tx_ring->txbufs[wr_idx].fidx = -2;
+err_dma_err:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ nfp_net_tls_tx_undo(skb, tls_handle);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfd3_tx_complete() - Handled completed TX packets
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
+ */
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ struct netdev_queue *nd_q;
+ u32 qcp_rd_p;
+ int todo;
+
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ while (todo--) {
+ const skb_frag_t *frag;
+ struct nfp_nfd3_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int fidx, nr_frags;
+ int idx;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
+ tx_buf = &tx_ring->txbufs[idx];
+
+ skb = tx_buf->skb;
+ if (!skb)
+ continue;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ fidx = tx_buf->fidx;
+
+ if (fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ done_pkts += tx_buf->pkt_cnt;
+ done_bytes += tx_buf->real_len;
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ /* check for last gather fragment */
+ if (fidx == nr_frags - 1)
+ napi_consume_skb(skb, budget);
+
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
+ }
+
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ if (!dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+ if (nfp_nfd3_tx_ring_should_wake(tx_ring)) {
+ /* Make sure TX thread will see updated tx_ring->rd_p */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(nd_q)))
+ netif_tx_wake_queue(nd_q);
+ }
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ bool done_all;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+ done_pkts = todo;
+ while (todo--) {
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_ring->rd_p++;
+
+ done_bytes += tx_ring->txbufs[idx].real_len;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+/* Receive processing
+ */
+
+static void *
+nfp_nfd3_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
+ } else {
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring structure
+ * @frag: page fragment buffer
+ * @dma_addr: DMA address of skb mapping
+ */
+static void
+nfp_nfd3_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
+ void *frag, dma_addr_t dma_addr)
+{
+ unsigned int wr_idx;
+
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+ /* Stash SKB and DMA address away */
+ rx_ring->rxbufs[wr_idx].frag = frag;
+ rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+ /* Fill freelist descriptor */
+ rx_ring->rxds[wr_idx].fld.reserved = 0;
+ rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
+
+ rx_ring->wr_p++;
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+ /* Update write pointer of the freelist queue. Make
+ * sure all writes are flushed before telling the hardware.
+ */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+ }
+}
+
+/**
+ * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_nfd3_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfd3_rx_csum_has_errors(u16 flags)
+{
+ u16 csum_all_checked, csum_all_ok;
+
+ csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+ csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+ return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta, struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_complete++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_error++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ /* Assume that the firmware will never report inner CSUM_OK unless outer
+ * L4 headers were successfully parsed. FW will always report zero UDP
+ * checksum as CSUM_OK.
+ */
+ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_inner_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+}
+
+static void
+nfp_nfd3_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
+{
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (type) {
+ case NFP_NET_RSS_IPV4:
+ case NFP_NET_RSS_IPV6:
+ case NFP_NET_RSS_IPV6_EX:
+ meta->hash_type = PKT_HASH_TYPE_L3;
+ break;
+ default:
+ meta->hash_type = PKT_HASH_TYPE_L4;
+ break;
+ }
+
+ meta->hash = get_unaligned_be32(hash);
+}
+
+static void
+nfp_nfd3_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash = data;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ nfp_nfd3_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_nfd3_set_hash(netdev, meta,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ meta->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return false;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
+ default:
+ return true;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data != pkt;
+}
+
+static void
+nfp_nfd3_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool
+nfp_nfd3_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+ unsigned int pkt_len, bool *completed)
+{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ int wr_idx;
+
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ if (!*completed) {
+ nfp_nfd3_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_nfd3_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
+ }
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
+
+ txbuf->frag = rxbuf->frag;
+ txbuf->dma_addr = rxbuf->dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = pkt_len;
+
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+ pkt_len, DMA_BIDIRECTIONAL);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ return true;
+}
+
+/**
+ * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring: RX ring to receive from
+ * @budget: NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ struct xdp_buff xdp;
+ int idx;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ bool redir_egress = false;
+ struct net_device *netdev;
+ dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
+ void *new_frag;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (!dp->chained_metadata_format) {
+ nfp_nfd3_set_hash_desc(dp->netdev, &meta,
+ rxbuf->frag + meta_off, rxd);
+ } else if (meta_len) {
+ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
+ if (xdp_prog && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
+ unsigned int dma_off;
+ int act;
+
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len = xdp.data_end - xdp.data;
+ pkt_off += xdp.data - orig_data;
+
+ switch (act) {
+ case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
+ break;
+ case XDP_TX:
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_nfd3_tx_xdp_buf(dp, rx_ring,
+ tx_ring,
+ rxbuf,
+ dma_off,
+ pkt_len,
+ &xdp_tx_cmpl)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_nfd3_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
+ if (unlikely(!netdev)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfd3_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+#ifdef CONFIG_TLS_DEVICE
+ if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+ skb->decrypted = true;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_tls_rx++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+#endif
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
+
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
+ }
+
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_nfd3_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
+
+ return pkts_polled;
+}
+
+/**
+ * nfp_nfd3_poll() - napi poll function
+ * @napi: NAPI structure
+ * @budget: NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfd3_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled = 0;
+
+ if (r_vec->tx_ring)
+ nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+ if (r_vec->rx_ring)
+ pkts_polled = nfp_nfd3_rx(r_vec->rx_ring, budget);
+
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
+ return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ unsigned int real_len = skb->len, meta_len = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return true;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ meta_len = 8;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
+ }
+
+ /* Start with the head skbuf */
+ dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_dma_warn;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ /* Stash the soft descriptor of the head then initialize it */
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->skb = skb;
+ txbuf->dma_addr = dma_addr;
+ txbuf->fidx = -1;
+ txbuf->pkt_cnt = 1;
+ txbuf->real_len = real_len;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = meta_len | NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(skb_headlen(skb));
+ nfp_desc_set_dma_addr(txd, dma_addr);
+ txd->data_len = cpu_to_le16(skb->len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_p++;
+ tx_ring->wr_ptr_add++;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return false;
+
+err_dma_warn:
+ nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_nfd3_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_nfd3_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfd3_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
+
+ return budget;
+}
+
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t)
+{
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+ spin_lock(&r_vec->lock);
+ nfp_nfd3_tx_complete(r_vec->tx_ring, 0);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock(&r_vec->lock);
+
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
new file mode 100644
index 000000000000..7a0df9e6c3c4
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFD3_H_
+#define _NFP_DP_NFD3_H_
+
+struct sk_buff;
+struct net_device;
+
+/* TX descriptor format */
+
+#define NFD3_DESC_TX_EOP BIT(7)
+#define NFD3_DESC_TX_OFFSET_MASK GENMASK(6, 0)
+#define NFD3_DESC_TX_MSS_MASK GENMASK(13, 0)
+
+/* Flags in the host TX descriptor */
+#define NFD3_DESC_TX_CSUM BIT(7)
+#define NFD3_DESC_TX_IP4_CSUM BIT(6)
+#define NFD3_DESC_TX_TCP_CSUM BIT(5)
+#define NFD3_DESC_TX_UDP_CSUM BIT(4)
+#define NFD3_DESC_TX_VLAN BIT(3)
+#define NFD3_DESC_TX_LSO BIT(2)
+#define NFD3_DESC_TX_ENCAP BIT(1)
+#define NFD3_DESC_TX_O_IP4_CSUM BIT(0)
+
+struct nfp_nfd3_tx_desc {
+ union {
+ struct {
+ u8 dma_addr_hi; /* High bits of host buf address */
+ __le16 dma_len; /* Length to DMA for this desc */
+ u8 offset_eop; /* Offset in buf where pkt starts +
+ * highest bit is eop flag.
+ */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+
+ __le16 mss; /* MSS to be used for LSO */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
+ u8 flags; /* TX Flags, see @NFD3_DESC_TX_* */
+ union {
+ struct {
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
+ __le16 data_len; /* Length of frame + meta data */
+ } __packed;
+ __le32 vals[4];
+ __le64 vals8[2];
+ };
+};
+
+/**
+ * struct nfp_nfd3_tx_buf - software TX buffer descriptor
+ * @skb: normal ring, sk_buff associated with this buffer
+ * @frag: XDP ring, page frag associated with this buffer
+ * @xdp: XSK buffer pool handle (for AF_XDP)
+ * @dma_addr: DMA mapping address of the buffer
+ * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
+ * @pkt_cnt: Number of packets to be produced out of the skb associated
+ * with this buffer (valid only on the head's buffer).
+ * Will be 1 for all non-TSO packets.
+ * @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
+ * buffer from the TX queue (for AF_XDP).
+ * @real_len: Number of bytes which to be produced out of the skb (valid only
+ * on the head's buffer). Equal to skb->len for non-TSO packets.
+ */
+struct nfp_nfd3_tx_buf {
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ struct xdp_buff *xdp;
+ };
+ dma_addr_t dma_addr;
+ union {
+ struct {
+ short int fidx;
+ u16 pkt_cnt;
+ };
+ struct {
+ bool is_xsk_tx;
+ };
+ };
+ u32 real_len;
+};
+
+void
+nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ const struct nfp_net_rx_desc *rxd,
+ const struct nfp_meta_parsed *meta, struct sk_buff *skb);
+bool
+nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len);
+void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
+int nfp_nfd3_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/rings.c b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
new file mode 100644
index 000000000000..47604d5e25eb
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/rings.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_nfd3_tx_buf *txbuf;
+ unsigned int idx;
+
+ while (tx_ring->rd_p != tx_ring->wr_p) {
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ txbuf = &tx_ring->txbufs[idx];
+
+ txbuf->real_len = 0;
+
+ tx_ring->qcp_rd_p++;
+ tx_ring->rd_p++;
+
+ if (tx_ring->r_vec->xsk_pool) {
+ if (txbuf->is_xsk_tx)
+ nfp_nfd3_xsk_tx_free(txbuf);
+
+ xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
+ }
+ }
+}
+
+/**
+ * nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @dp: NFP Net data path struct
+ * @tx_ring: TX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+static void
+nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct netdev_queue *nd_q;
+ const skb_frag_t *frag;
+
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+ struct nfp_nfd3_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int idx, nr_frags;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_buf = &tx_ring->txbufs[idx];
+
+ skb = tx_ring->txbufs[idx].skb;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_buf->fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ }
+
+ /* check for last gather fragment */
+ if (tx_buf->fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
+
+ tx_ring->qcp_rd_p++;
+ tx_ring->rd_p++;
+ }
+
+ if (tx_ring->is_xdp)
+ nfp_nfd3_xsk_tx_bufs_free(tx_ring);
+
+ memset(tx_ring->txds, 0, tx_ring->size);
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
+ if (tx_ring->is_xdp || !dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_reset_queue(nd_q);
+}
+
+/**
+ * nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
+ * @tx_ring: TX ring to free
+ */
+static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ kvfree(tx_ring->txbufs);
+
+ if (tx_ring->txds)
+ dma_free_coherent(dp->dev, tx_ring->size,
+ tx_ring->txds, tx_ring->dma);
+
+ tx_ring->cnt = 0;
+ tx_ring->txbufs = NULL;
+ tx_ring->txds = NULL;
+ tx_ring->dma = 0;
+ tx_ring->size = 0;
+}
+
+/**
+ * nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
+ * @dp: NFP Net data path struct
+ * @tx_ring: TX Ring structure to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+ tx_ring->cnt = dp->txd_cnt;
+
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
+ tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->txds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
+ goto err_alloc;
+ }
+
+ tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
+ GFP_KERNEL);
+ if (!tx_ring->txbufs)
+ goto err_alloc;
+
+ if (!tx_ring->is_xdp && dp->netdev)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
+
+ return 0;
+
+err_alloc:
+ nfp_nfd3_tx_ring_free(tx_ring);
+ return -ENOMEM;
+}
+
+static void
+nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ unsigned int i;
+
+ if (!tx_ring->is_xdp)
+ return;
+
+ for (i = 0; i < tx_ring->cnt; i++) {
+ if (!tx_ring->txbufs[i].frag)
+ return;
+
+ nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
+ __free_page(virt_to_page(tx_ring->txbufs[i].frag));
+ }
+}
+
+static int
+nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
+ unsigned int i;
+
+ if (!tx_ring->is_xdp)
+ return 0;
+
+ for (i = 0; i < tx_ring->cnt; i++) {
+ txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
+ if (!txbufs[i].frag) {
+ nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nfp_nfd3_print_tx_descs(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ struct nfp_nfd3_tx_desc *txd;
+ u32 txd_cnt = tx_ring->cnt;
+ int i;
+
+ for (i = 0; i < txd_cnt; i++) {
+ struct xdp_buff *xdp;
+ struct sk_buff *skb;
+
+ txd = &tx_ring->txds[i];
+ seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
+ txd->vals[0], txd->vals[1],
+ txd->vals[2], txd->vals[3]);
+
+ if (!tx_ring->is_xdp) {
+ skb = READ_ONCE(tx_ring->txbufs[i].skb);
+ if (skb)
+ seq_printf(file, " skb->head=%p skb->data=%p",
+ skb->head, skb->data);
+ } else {
+ xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
+ if (xdp)
+ seq_printf(file, " xdp->data=%p", xdp->data);
+ }
+
+ if (tx_ring->txbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &tx_ring->txbufs[i].dma_addr);
+
+ if (i == tx_ring->rd_p % txd_cnt)
+ seq_puts(file, " H_RD");
+ if (i == tx_ring->wr_p % txd_cnt)
+ seq_puts(file, " H_WR");
+ if (i == d_rd_p % txd_cnt)
+ seq_puts(file, " D_RD");
+ if (i == d_wr_p % txd_cnt)
+ seq_puts(file, " D_WR");
+
+ seq_putc(file, '\n');
+ }
+}
+
+#define NFP_NFD3_CFG_CTRL_SUPPORTED \
+ (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
+ NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
+ NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
+ NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
+ NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
+ NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
+ NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
+ NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
+ NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
+ NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfd3_ops = {
+ .version = NFP_NFD_VER_NFD3,
+ .tx_min_desc_per_pkt = 1,
+ .cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
+ .poll = nfp_nfd3_poll,
+ .xsk_poll = nfp_nfd3_xsk_poll,
+ .ctrl_poll = nfp_nfd3_ctrl_poll,
+ .xmit = nfp_nfd3_tx,
+ .ctrl_tx_one = nfp_nfd3_ctrl_tx_one,
+ .rx_ring_fill_freelist = nfp_nfd3_rx_ring_fill_freelist,
+ .tx_ring_alloc = nfp_nfd3_tx_ring_alloc,
+ .tx_ring_reset = nfp_nfd3_tx_ring_reset,
+ .tx_ring_free = nfp_nfd3_tx_ring_free,
+ .tx_ring_bufs_alloc = nfp_nfd3_tx_ring_bufs_alloc,
+ .tx_ring_bufs_free = nfp_nfd3_tx_ring_bufs_free,
+ .print_tx_descs = nfp_nfd3_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
new file mode 100644
index 000000000000..c16c4b42ecfd
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../nfp_net_xsk.h"
+#include "nfd3.h"
+
+static bool
+nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
+ int pkt_off)
+{
+ struct xsk_buff_pool *pool = r_vec->xsk_pool;
+ struct nfp_nfd3_tx_buf *txbuf;
+ struct nfp_nfd3_tx_desc *txd;
+ unsigned int wr_idx;
+
+ if (nfp_net_tx_space(tx_ring) < 1)
+ return false;
+
+ xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off,
+ pkt_len);
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ txbuf = &tx_ring->txbufs[wr_idx];
+ txbuf->xdp = xrxbuf->xdp;
+ txbuf->real_len = pkt_len;
+ txbuf->is_xsk_tx = true;
+
+ /* Build TX descriptor */
+ txd = &tx_ring->txds[wr_idx];
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(pkt_len);
+ nfp_desc_set_dma_addr(txd, xrxbuf->dma_addr + pkt_off);
+ txd->data_len = cpu_to_le16(pkt_len);
+
+ txd->flags = 0;
+ txd->mss = 0;
+ txd->lso_hdrlen = 0;
+
+ tx_ring->wr_ptr_add++;
+ tx_ring->wr_p++;
+
+ return true;
+}
+
+static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
+ const struct nfp_net_rx_desc *rxd,
+ struct nfp_net_xsk_rx_buf *xrxbuf,
+ const struct nfp_meta_parsed *meta,
+ unsigned int pkt_len,
+ bool meta_xdp,
+ unsigned int *skbs_polled)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+
+ if (likely(!meta->portid)) {
+ netdev = dp->netdev;
+ } else {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
+ if (unlikely(!netdev)) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = napi_alloc_skb(&r_vec->napi, pkt_len);
+ if (!skb) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ return;
+ }
+ memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
+
+ skb->mark = meta->mark;
+ skb_set_hash(skb, meta->hash, meta->hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxd->rxd.vlan));
+ if (meta_xdp)
+ skb_metadata_set(skb,
+ xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
+
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+
+ nfp_net_xsk_rx_free(xrxbuf);
+
+ (*skbs_polled)++;
+}
+
+static unsigned int
+nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
+ unsigned int *skbs_polled)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_redir = false;
+ int pkts_polled = 0;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, pkt_len, pkt_off;
+ struct nfp_net_xsk_rx_buf *xrxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ int idx, act;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ xrxbuf = &rx_ring->xsk_rxbufs[idx];
+
+ /* If starved of buffers "drop" it and scream. */
+ if (rx_ring->rd_p >= rx_ring->wr_p) {
+ nn_dp_warn(dp, "Starved of RX buffers\n");
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ break;
+ }
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ /* Only supporting AF_XDP with dynamic metadata so buffer layout
+ * is always:
+ *
+ * ---------------------------------------------------------
+ * | off | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
+ nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ continue;
+ }
+
+ /* Stats update. */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ xrxbuf->xdp->data += meta_len;
+ xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
+ xdp_set_data_meta_invalid(xrxbuf->xdp);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ net_prefetch(xrxbuf->xdp->data);
+
+ if (meta_len) {
+ if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
+ xrxbuf->xdp->data -
+ meta_len,
+ xrxbuf->xdp->data,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "Invalid RX packet metadata\n");
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ continue;
+ }
+
+ if (unlikely(meta.portid)) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ if (meta.portid != NFP_META_PORT_ID_CTRL) {
+ nfp_nfd3_xsk_rx_skb(rx_ring, rxd,
+ xrxbuf, &meta,
+ pkt_len, false,
+ skbs_polled);
+ continue;
+ }
+
+ nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
+ pkt_len);
+ nfp_net_xsk_rx_free(xrxbuf);
+ continue;
+ }
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
+
+ pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
+ pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
+
+ switch (act) {
+ case XDP_PASS:
+ nfp_nfd3_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
+ true, skbs_polled);
+ break;
+ case XDP_TX:
+ if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
+ xrxbuf, pkt_len, pkt_off))
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ else
+ nfp_net_xsk_rx_unstash(xrxbuf);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ } else {
+ nfp_net_xsk_rx_unstash(xrxbuf);
+ xdp_redir = true;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_net_xsk_rx_drop(r_vec, xrxbuf);
+ break;
+ }
+ }
+
+ nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
+
+ if (xdp_redir)
+ xdp_do_flush_map();
+
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return pkts_polled;
+}
+
+void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf)
+{
+ xsk_buff_free(txbuf->xdp);
+
+ txbuf->dma_addr = 0;
+ txbuf->xdp = NULL;
+}
+
+static bool nfp_nfd3_xsk_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ u32 done_pkts = 0, done_bytes = 0, reused = 0;
+ bool done_all;
+ int idx, todo;
+ u32 qcp_rd_p;
+
+ if (tx_ring->wr_p == tx_ring->rd_p)
+ return true;
+
+ /* Work out how many descriptors have been transmitted. */
+ qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
+
+ done_pkts = todo;
+ while (todo--) {
+ struct nfp_nfd3_tx_buf *txbuf;
+
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
+ tx_ring->rd_p++;
+
+ txbuf = &tx_ring->txbufs[idx];
+ if (unlikely(!txbuf->real_len))
+ continue;
+
+ done_bytes += txbuf->real_len;
+ txbuf->real_len = 0;
+
+ if (txbuf->is_xsk_tx) {
+ nfp_nfd3_xsk_tx_free(txbuf);
+ reused++;
+ }
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
+ struct xsk_buff_pool *xsk_pool;
+ struct nfp_nfd3_tx_desc *txd;
+ u32 pkts = 0, wr_idx;
+ u32 i, got;
+
+ xsk_pool = r_vec->xsk_pool;
+
+ while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
+ for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
+ if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
+ break;
+ got = i;
+ if (!got)
+ break;
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+ prefetchw(&tx_ring->txds[wr_idx]);
+
+ for (i = 0; i < got; i++)
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
+ desc[i].len);
+
+ for (i = 0; i < got; i++) {
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
+
+ tx_ring->txbufs[wr_idx].real_len = desc[i].len;
+ tx_ring->txbufs[wr_idx].is_xsk_tx = false;
+
+ /* Build TX descriptor. */
+ txd = &tx_ring->txds[wr_idx];
+ nfp_desc_set_dma_addr(txd,
+ xsk_buff_raw_get_dma(xsk_pool,
+ desc[i].addr
+ ));
+ txd->offset_eop = NFD3_DESC_TX_EOP;
+ txd->dma_len = cpu_to_le16(desc[i].len);
+ txd->data_len = cpu_to_le16(desc[i].len);
+ }
+
+ tx_ring->wr_p += got;
+ pkts += got;
+ }
+
+ if (!pkts)
+ return;
+
+ xsk_tx_release(xsk_pool);
+ /* Ensure all records are visible before incrementing write counter. */
+ wmb();
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
+}
+
+int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled, skbs = 0;
+
+ pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs);
+
+ if (pkts_polled < budget) {
+ if (r_vec->tx_ring)
+ nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
+
+ if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring))
+ pkts_polled = budget;
+
+ nfp_nfd3_xsk_tx(r_vec->xdp_ring);
+
+ if (pkts_polled < budget && napi_complete_done(napi, skbs))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ }
+
+ return pkts_polled;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
new file mode 100644
index 000000000000..e3da9ac20e57
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -0,0 +1,1524 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include <linux/bpf_trace.h>
+#include <linux/netdevice.h>
+#include <linux/overflow.h>
+#include <linux/sizes.h>
+#include <linux/bitfield.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "../crypto/crypto.h"
+#include "../crypto/fw.h"
+#include "nfdk.h"
+
+static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
+{
+ return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2);
+}
+
+static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
+{
+ return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT);
+}
+
+static void nfp_nfdk_tx_ring_stop(struct netdev_queue *nd_q,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ netif_tx_stop_queue(nd_q);
+
+ /* We can race with the TX completion out of NAPI so recheck */
+ smp_mb();
+ if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring)))
+ netif_tx_start_queue(nd_q);
+}
+
+static __le64
+nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
+ struct sk_buff *skb)
+{
+ u32 segs, hdrlen, l3_offset, l4_offset;
+ struct nfp_nfdk_tx_desc txd;
+ u16 mss;
+
+ if (!skb->encapsulation) {
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
+ hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
+ hdrlen = skb_inner_transport_header(skb) - skb->data +
+ inner_tcp_hdrlen(skb);
+ }
+
+ segs = skb_shinfo(skb)->gso_segs;
+ mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
+
+ /* Note: TSO of the packet with metadata prepended to skb is not
+ * supported yet, in which case l3/l4_offset and lso_hdrlen need
+ * be correctly handled here.
+ * Concern:
+ * The driver doesn't have md_bytes easily available at this point.
+ * The PCI.IN PD ME won't have md_bytes bytes to add to lso_hdrlen,
+ * so it needs the full length there. The app MEs might prefer
+ * l3_offset and l4_offset relative to the start of packet data,
+ * but could probably cope with it being relative to the CTM buf
+ * data offset.
+ */
+ txd.l3_offset = l3_offset;
+ txd.l4_offset = l4_offset;
+ txd.lso_meta_res = 0;
+ txd.mss = cpu_to_le16(mss);
+ txd.lso_hdrlen = hdrlen;
+ txd.lso_totsegs = segs;
+
+ txbuf->pkt_cnt = segs;
+ txbuf->real_len = skb->len + hdrlen * (txbuf->pkt_cnt - 1);
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_lso++;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ return txd.raw;
+}
+
+static u8
+nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ unsigned int pkt_cnt, struct sk_buff *skb, u64 flags)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+
+ if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
+ return flags;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return flags;
+
+ flags |= NFDK_DESC_TX_L4_CSUM;
+
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ /* L3 checksum offloading flag is not required for ipv6 */
+ if (iph->version == 4) {
+ flags |= NFDK_DESC_TX_L3_CSUM;
+ } else if (ipv6h->version != 6) {
+ nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
+ return flags;
+ }
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ if (!skb->encapsulation) {
+ r_vec->hw_csum_tx += pkt_cnt;
+ } else {
+ flags |= NFDK_DESC_TX_ENCAP;
+ r_vec->hw_csum_tx_inner += pkt_cnt;
+ }
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ return flags;
+}
+
+static int
+nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
+ unsigned int nr_frags, struct sk_buff *skb)
+{
+ unsigned int n_descs, wr_p, nop_slots;
+ const skb_frag_t *frag, *fend;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int wr_idx;
+ int err;
+
+recount_descs:
+ n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
+
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+ for (; frag < fend; frag++)
+ n_descs += DIV_ROUND_UP(skb_frag_size(frag),
+ NFDK_TX_MAX_DATA_PER_DESC);
+
+ if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX)) {
+ if (skb_is_nonlinear(skb)) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ goto recount_descs;
+ }
+ return -EINVAL;
+ }
+
+ /* Under count by 1 (don't count meta) for the round down to work out */
+ n_descs += !!skb_is_gso(skb);
+
+ if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
+ goto close_block;
+
+ if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK)
+ goto close_block;
+
+ return 0;
+
+close_block:
+ wr_p = tx_ring->wr_p;
+ nop_slots = D_BLOCK_CPL(wr_p);
+
+ wr_idx = D_IDX(tx_ring, wr_p);
+ tx_ring->ktxbufs[wr_idx].skb = NULL;
+ txd = &tx_ring->ktxds[wr_idx];
+
+ memset(txd, 0, array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p += nop_slots;
+ tx_ring->wr_ptr_add += nop_slots;
+
+ return 0;
+}
+
+static int nfp_nfdk_prep_port_id(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ unsigned char *data;
+
+ if (likely(!md_dst))
+ return 0;
+ if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
+ return 0;
+
+ /* Note: Unsupported case when TSO a skb with metedata prepended.
+ * See the comments in `nfp_nfdk_tx_tso` for details.
+ */
+ if (unlikely(md_dst && skb_is_gso(skb)))
+ return -EOPNOTSUPP;
+
+ if (unlikely(skb_cow_head(skb, sizeof(md_dst->u.port_info.port_id))))
+ return -ENOMEM;
+
+ data = skb_push(skb, sizeof(md_dst->u.port_info.port_id));
+ put_unaligned_be32(md_dst->u.port_info.port_id, data);
+
+ return sizeof(md_dst->u.port_info.port_id);
+}
+
+static int
+nfp_nfdk_prep_tx_meta(struct nfp_app *app, struct sk_buff *skb,
+ struct nfp_net_r_vector *r_vec)
+{
+ unsigned char *data;
+ int res, md_bytes;
+ u32 meta_id = 0;
+
+ res = nfp_nfdk_prep_port_id(skb);
+ if (unlikely(res <= 0))
+ return res;
+
+ md_bytes = res;
+ meta_id = NFP_NET_META_PORTID;
+
+ if (unlikely(skb_cow_head(skb, sizeof(meta_id))))
+ return -ENOMEM;
+
+ md_bytes += sizeof(meta_id);
+
+ meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
+ FIELD_PREP(NFDK_META_FIELDS, meta_id);
+
+ data = skb_push(skb, sizeof(meta_id));
+ put_unaligned_be32(meta_id, data);
+
+ return NFDK_DESC_TX_CHAIN_META;
+}
+
+/**
+ * nfp_nfdk_tx() - Main transmit entry point
+ * @skb: SKB to transmit
+ * @netdev: netdev structure
+ *
+ * Return: NETDEV_TX_OK on success.
+ */
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_nfdk_tx_buf *txbuf, *etxbuf;
+ u32 cnt, tmp_dlen, dlen_type = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_net_r_vector *r_vec;
+ const skb_frag_t *frag, *fend;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int real_len, qidx;
+ unsigned int dma_len, type;
+ struct netdev_queue *nd_q;
+ struct nfp_net_dp *dp;
+ int nr_frags, wr_idx;
+ dma_addr_t dma_addr;
+ u64 metadata;
+
+ dp = &nn->dp;
+ qidx = skb_get_queue_mapping(skb);
+ tx_ring = &dp->tx_rings[qidx];
+ r_vec = tx_ring->r_vec;
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
+
+ /* Don't bother counting frags, assume the worst */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
+ qidx, tx_ring->wr_p, tx_ring->rd_p);
+ netif_tx_stop_queue(nd_q);
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ return NETDEV_TX_BUSY;
+ }
+
+ metadata = nfp_nfdk_prep_tx_meta(nn->app, skb, r_vec);
+ if (unlikely((int)metadata < 0))
+ goto err_flush;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
+ goto err_flush;
+
+ /* DMA map all */
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ dma_len = skb_headlen(skb);
+ if (skb_is_gso(skb))
+ type = NFDK_DESC_TX_TYPE_TSO;
+ else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_warn_dma;
+
+ txbuf->skb = skb;
+ txbuf++;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ /* FIELD_PREP() implicitly truncates to chunk */
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ /* starts at bit 0 */
+ BUILD_BUG_ON(!(NFDK_DESC_TX_DMA_LEN_HEAD & 1));
+
+ /* Preserve the original dlen_type, this way below the EOP logic
+ * can use dlen_type.
+ */
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ /* The rest of the data (if any) will be in larger dma descritors
+ * and is handled with the fragment loop.
+ */
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+
+ while (true) {
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ if (frag >= fend)
+ break;
+
+ dma_len = skb_frag_size(frag);
+ dma_addr = skb_frag_dma_map(dp->dev, frag, 0, dma_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_unmap;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ frag++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ if (!skb_is_gso(skb)) {
+ real_len = skb->len;
+ /* Metadata desc */
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, 1, skb, metadata);
+ txd->raw = cpu_to_le64(metadata);
+ txd++;
+ } else {
+ /* lso desc should be placed after metadata desc */
+ (txd + 1)->raw = nfp_nfdk_tx_tso(r_vec, txbuf, skb);
+ real_len = txbuf->real_len;
+ /* Metadata desc */
+ metadata = nfp_nfdk_tx_csum(dp, r_vec, txbuf->pkt_cnt, skb, metadata);
+ txd->raw = cpu_to_le64(metadata);
+ txd += 2;
+ txbuf++;
+ }
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+ goto err_warn_overflow;
+
+ skb_tx_timestamp(skb);
+
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += skb->len;
+ else
+ tx_ring->data_pending = 0;
+
+ if (nfp_nfdk_tx_ring_should_stop(tx_ring))
+ nfp_nfdk_tx_ring_stop(nd_q, tx_ring);
+
+ tx_ring->wr_ptr_add += cnt;
+ if (__netdev_tx_sent_queue(nd_q, real_len, netdev_xmit_more()))
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_warn_overflow:
+ WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+ wr_idx, skb_headlen(skb), nr_frags, cnt);
+ if (skb_is_gso(skb))
+ txbuf--;
+err_unmap:
+ /* txbuf pointed to the next-to-use */
+ etxbuf = txbuf;
+ /* first txbuf holds the skb */
+ txbuf = &tx_ring->ktxbufs[wr_idx + 1];
+ if (txbuf < etxbuf) {
+ dma_unmap_single(dp->dev, txbuf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+ txbuf++;
+ }
+ frag = skb_shinfo(skb)->frags;
+ while (etxbuf < txbuf) {
+ dma_unmap_page(dp->dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+ frag++;
+ txbuf++;
+ }
+err_warn_dma:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_flush:
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * nfp_nfdk_tx_complete() - Handled completed TX packets
+ * @tx_ring: TX ring structure
+ * @budget: NAPI budget (only used as bool to determine if in NAPI context)
+ */
+static void nfp_nfdk_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ u32 done_pkts = 0, done_bytes = 0;
+ struct nfp_nfdk_tx_buf *ktxbufs;
+ struct device *dev = dp->dev;
+ struct netdev_queue *nd_q;
+ u32 rd_p, qcp_rd_p;
+ int todo;
+
+ rd_p = tx_ring->rd_p;
+ if (tx_ring->wr_p == rd_p)
+ return;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+ ktxbufs = tx_ring->ktxbufs;
+
+ while (todo > 0) {
+ const skb_frag_t *frag, *fend;
+ unsigned int size, n_descs = 1;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct sk_buff *skb;
+
+ txbuf = &ktxbufs[D_IDX(tx_ring, rd_p)];
+ skb = txbuf->skb;
+ txbuf++;
+
+ /* Closed block */
+ if (!skb) {
+ n_descs = D_BLOCK_CPL(rd_p);
+ goto next;
+ }
+
+ /* Unmap head */
+ size = skb_headlen(skb);
+ n_descs += nfp_nfdk_headlen_to_segs(size);
+ dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+ txbuf++;
+
+ /* Unmap frags */
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + skb_shinfo(skb)->nr_frags;
+ for (; frag < fend; frag++) {
+ size = skb_frag_size(frag);
+ n_descs += DIV_ROUND_UP(size,
+ NFDK_TX_MAX_DATA_PER_DESC);
+ dma_unmap_page(dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ txbuf++;
+ }
+
+ if (!skb_is_gso(skb)) {
+ done_bytes += skb->len;
+ done_pkts++;
+ } else {
+ done_bytes += txbuf->real_len;
+ done_pkts += txbuf->pkt_cnt;
+ n_descs++;
+ }
+
+ napi_consume_skb(skb, budget);
+next:
+ rd_p += n_descs;
+ todo -= n_descs;
+ }
+
+ tx_ring->rd_p = rd_p;
+ tx_ring->qcp_rd_p = qcp_rd_p;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_bytes += done_bytes;
+ r_vec->tx_pkts += done_pkts;
+ u64_stats_update_end(&r_vec->tx_sync);
+
+ if (!dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
+ if (nfp_nfdk_tx_ring_should_wake(tx_ring)) {
+ /* Make sure TX thread will see updated tx_ring->rd_p */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(nd_q)))
+ netif_tx_wake_queue(nd_q);
+ }
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+}
+
+/* Receive processing */
+static void *
+nfp_nfdk_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
+ } else {
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_nfdk_rx_give_one() - Put mapped skb on the software and hardware rings
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring structure
+ * @frag: page fragment buffer
+ * @dma_addr: DMA address of skb mapping
+ */
+static void
+nfp_nfdk_rx_give_one(const struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring,
+ void *frag, dma_addr_t dma_addr)
+{
+ unsigned int wr_idx;
+
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ nfp_net_dma_sync_dev_rx(dp, dma_addr);
+
+ /* Stash SKB and DMA address away */
+ rx_ring->rxbufs[wr_idx].frag = frag;
+ rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
+
+ /* Fill freelist descriptor */
+ rx_ring->rxds[wr_idx].fld.reserved = 0;
+ rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ dma_addr + dp->rx_dma_off);
+
+ rx_ring->wr_p++;
+ if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
+ /* Update write pointer of the freelist queue. Make
+ * sure all writes are flushed before telling the hardware.
+ */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
+ }
+}
+
+/**
+ * nfp_nfdk_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to fill
+ */
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_nfdk_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
+ * nfp_nfdk_rx_csum_has_errors() - group check if rxd has any csum errors
+ * @flags: RX descriptor flags field in CPU byte order
+ */
+static int nfp_nfdk_rx_csum_has_errors(u16 flags)
+{
+ u16 csum_all_checked, csum_all_ok;
+
+ csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
+ csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
+
+ return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
+}
+
+/**
+ * nfp_nfdk_rx_csum() - set SKB checksum field based on RX descriptor flags
+ * @dp: NFP Net data path struct
+ * @r_vec: per-ring structure
+ * @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
+ * @skb: Pointer to SKB
+ */
+static void
+nfp_nfdk_rx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_desc *rxd, struct nfp_meta_parsed *meta,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ if (!(dp->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_complete++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ if (nfp_nfdk_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_error++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
+ /* Assume that the firmware will never report inner CSUM_OK unless outer
+ * L4 headers were successfully parsed. FW will always report zero UDP
+ * checksum as CSUM_OK.
+ */
+ if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
+ rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
+ __skb_incr_checksum_unnecessary(skb);
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_inner_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ }
+}
+
+static void
+nfp_nfdk_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ unsigned int type, __be32 *hash)
+{
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (type) {
+ case NFP_NET_RSS_IPV4:
+ case NFP_NET_RSS_IPV6:
+ case NFP_NET_RSS_IPV6_EX:
+ meta->hash_type = PKT_HASH_TYPE_L3;
+ break;
+ default:
+ meta->hash_type = PKT_HASH_TYPE_L4;
+ break;
+ }
+
+ meta->hash = get_unaligned_be32(hash);
+}
+
+static bool
+nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
+ void *data, void *pkt, unsigned int pkt_len, int meta_len)
+{
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_nfdk_set_hash(netdev, meta,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ meta->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_PORTID:
+ meta->portid = get_unaligned_be32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
+ case NFP_NET_META_RESYNC_INFO:
+ if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
+ pkt_len))
+ return false;
+ data += sizeof(struct nfp_net_tls_resync_req);
+ break;
+ default:
+ return true;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data != pkt;
+}
+
+static void
+nfp_nfdk_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
+ struct sk_buff *skb)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ /* skb is build based on the frag, free_skb() would free the frag
+ * so to be able to reuse it we need an extra ref.
+ */
+ if (skb && rxbuf && skb->head == rxbuf->frag)
+ page_ref_inc(virt_to_head_page(rxbuf->frag));
+ if (rxbuf)
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool nfp_nfdk_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_rx_ring *rx_ring;
+ u32 qcp_rd_p, done = 0;
+ bool done_all;
+ int todo;
+
+ /* Work out how many descriptors have been transmitted */
+ qcp_rd_p = nfp_net_read_tx_cmpl(tx_ring, dp);
+ if (qcp_rd_p == tx_ring->qcp_rd_p)
+ return true;
+
+ todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ rx_ring = r_vec->rx_ring;
+ while (todo > 0) {
+ int idx = D_IDX(tx_ring, tx_ring->rd_p + done);
+ struct nfp_nfdk_tx_buf *txbuf;
+ unsigned int step = 1;
+
+ txbuf = &tx_ring->ktxbufs[idx];
+ if (!txbuf->raw)
+ goto next;
+
+ if (NFDK_TX_BUF_INFO(txbuf->val) != NFDK_TX_BUF_INFO_SOP) {
+ WARN_ONCE(1, "Unexpected TX buffer in XDP TX ring\n");
+ goto next;
+ }
+
+ /* Two successive txbufs are used to stash virtual and dma
+ * address respectively, recycle and clean them here.
+ */
+ nfp_nfdk_rx_give_one(dp, rx_ring,
+ (void *)NFDK_TX_BUF_PTR(txbuf[0].val),
+ txbuf[1].dma_addr);
+ txbuf[0].raw = 0;
+ txbuf[1].raw = 0;
+ step = 2;
+
+ u64_stats_update_begin(&r_vec->tx_sync);
+ /* Note: tx_bytes not accumulated. */
+ r_vec->tx_pkts++;
+ u64_stats_update_end(&r_vec->tx_sync);
+next:
+ todo -= step;
+ done += step;
+ }
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + done);
+ tx_ring->rd_p += done;
+
+ WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
+}
+
+static bool
+nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
+ unsigned int pkt_len, bool *completed)
+{
+ unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
+ unsigned int dma_len, type, cnt, dlen_type, tmp_dlen;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int n_descs;
+ dma_addr_t dma_addr;
+ int wr_idx;
+
+ /* Reject if xdp_adjust_tail grow packet beyond DMA area */
+ if (pkt_len + dma_off > dma_map_sz)
+ return false;
+
+ /* Make sure there's still at least one block available after
+ * aligning to block boundary, so that the txds used below
+ * won't wrap around the tx_ring.
+ */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ if (!*completed) {
+ nfp_nfdk_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ nfp_nfdk_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
+ }
+
+ /* Check if cross block boundary */
+ n_descs = nfp_nfdk_headlen_to_segs(pkt_len);
+ if ((round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) ||
+ ((u32)tx_ring->data_pending + pkt_len >
+ NFDK_TX_MAX_DATA_PER_BLOCK)) {
+ unsigned int nop_slots = D_BLOCK_CPL(tx_ring->wr_p);
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ memset(txd, 0,
+ array_size(nop_slots, sizeof(struct nfp_nfdk_tx_desc)));
+
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p += nop_slots;
+ tx_ring->wr_ptr_add += nop_slots;
+ }
+
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ txbuf[0].val = (unsigned long)rxbuf->frag | NFDK_TX_BUF_INFO_SOP;
+ txbuf[1].dma_addr = rxbuf->dma_addr;
+ /* Note: pkt len not stored */
+
+ dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
+ pkt_len, DMA_BIDIRECTIONAL);
+
+ /* Build TX descriptor */
+ txd = &tx_ring->ktxds[wr_idx];
+ dma_len = pkt_len;
+ dma_addr = rxbuf->dma_addr + dma_off;
+
+ if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ /* FIELD_PREP() implicitly truncates to chunk */
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ dlen_type &= NFDK_DESC_TX_DMA_LEN;
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ /* Metadata desc */
+ txd->raw = 0;
+ txd++;
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += pkt_len;
+ else
+ tx_ring->data_pending = 0;
+
+ tx_ring->wr_ptr_add += cnt;
+ return true;
+}
+
+/**
+ * nfp_nfdk_rx() - receive up to @budget packets on @rx_ring
+ * @rx_ring: RX ring to receive from
+ * @budget: NAPI budget
+ *
+ * Note, this function is separated out from the napi poll function to
+ * more cleanly separate packet receive code from other bookkeeping
+ * functions performed in the napi poll function.
+ *
+ * Return: Number of packets received.
+ */
+static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+ struct nfp_net_tx_ring *tx_ring;
+ struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
+ unsigned int true_bufsz;
+ struct sk_buff *skb;
+ int pkts_polled = 0;
+ struct xdp_buff xdp;
+ int idx;
+
+ xdp_prog = READ_ONCE(dp->xdp_prog);
+ true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
+ tx_ring = r_vec->xdp_ring;
+
+ while (pkts_polled < budget) {
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ struct nfp_meta_parsed meta;
+ bool redir_egress = false;
+ struct net_device *netdev;
+ dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
+ void *new_frag;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ break;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ memset(&meta, 0, sizeof(meta));
+
+ rx_ring->rd_p++;
+ pkts_polled++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
+ (dp->rx_offset && meta_len > dp->rx_offset))) {
+ nn_dp_warn(dp, "oversized RX packet metadata %u\n",
+ meta_len);
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
+ data_len);
+
+ if (meta_len) {
+ if (unlikely(nfp_nfdk_parse_meta(dp->netdev, &meta,
+ rxbuf->frag + meta_off,
+ rxbuf->frag + pkt_off,
+ pkt_len, meta_len))) {
+ nn_dp_warn(dp, "invalid RX packet metadata\n");
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+ }
+
+ if (xdp_prog && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
+ unsigned int dma_off;
+ int act;
+
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len = xdp.data_end - xdp.data;
+ pkt_off += xdp.data - orig_data;
+
+ switch (act) {
+ case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
+ break;
+ case XDP_TX:
+ dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
+ if (unlikely(!nfp_nfdk_tx_xdp_buf(dp, rx_ring,
+ tx_ring,
+ rxbuf,
+ dma_off,
+ pkt_len,
+ &xdp_tx_cmpl)))
+ trace_xdp_exception(dp->netdev,
+ xdp_prog, act);
+ continue;
+ default:
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dp->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ }
+ }
+
+ if (likely(!meta.portid)) {
+ netdev = dp->netdev;
+ } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
+ struct nfp_net *nn = netdev_priv(dp->netdev);
+
+ nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
+ pkt_len);
+ nfp_nfdk_rx_give_one(dp, rx_ring, rxbuf->frag,
+ rxbuf->dma_addr);
+ continue;
+ } else {
+ struct nfp_net *nn;
+
+ nn = netdev_priv(dp->netdev);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
+ if (unlikely(!netdev)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf,
+ NULL);
+ continue;
+ }
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
+ }
+
+ skb = build_skb(rxbuf->frag, true_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ continue;
+ }
+ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ continue;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ skb->mark = meta.mark;
+ skb_set_hash(skb, meta.hash, meta.hash_type);
+
+ skb_record_rx_queue(skb, rx_ring->idx);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ nfp_nfdk_rx_csum(dp, r_vec, rxd, &meta, skb);
+
+ if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
+
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ skb_reset_network_header(skb);
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
+ }
+
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_nfdk_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
+
+ return pkts_polled;
+}
+
+/**
+ * nfp_nfdk_poll() - napi poll function
+ * @napi: NAPI structure
+ * @budget: NAPI budget
+ *
+ * Return: number of packets polled.
+ */
+int nfp_nfdk_poll(struct napi_struct *napi, int budget)
+{
+ struct nfp_net_r_vector *r_vec =
+ container_of(napi, struct nfp_net_r_vector, napi);
+ unsigned int pkts_polled = 0;
+
+ if (r_vec->tx_ring)
+ nfp_nfdk_tx_complete(r_vec->tx_ring, budget);
+ if (r_vec->rx_ring)
+ pkts_polled = nfp_nfdk_rx(r_vec->rx_ring, budget);
+
+ if (pkts_polled < budget)
+ if (napi_complete_done(napi, pkts_polled))
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->rx_dim, dim_sample);
+ }
+
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
+ struct dim_sample dim_sample = {};
+ unsigned int start;
+ u64 pkts, bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+
+ dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
+ net_dim(&r_vec->tx_dim, dim_sample);
+ }
+
+ return pkts_polled;
+}
+
+/* Control device data path
+ */
+
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old)
+{
+ u32 cnt, tmp_dlen, dlen_type = 0;
+ struct nfp_net_tx_ring *tx_ring;
+ struct nfp_nfdk_tx_buf *txbuf;
+ struct nfp_nfdk_tx_desc *txd;
+ unsigned int dma_len, type;
+ struct nfp_net_dp *dp;
+ dma_addr_t dma_addr;
+ u64 metadata = 0;
+ int wr_idx;
+
+ dp = &r_vec->nfp_net->dp;
+ tx_ring = r_vec->tx_ring;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
+ nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
+ goto err_free;
+ }
+
+ /* Don't bother counting frags, assume the worst */
+ if (unlikely(nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT))) {
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_busy++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ if (!old)
+ __skb_queue_tail(&r_vec->queue, skb);
+ else
+ __skb_queue_head(&r_vec->queue, skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (nfp_app_ctrl_has_meta(nn->app)) {
+ if (unlikely(skb_headroom(skb) < 8)) {
+ nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
+ goto err_free;
+ }
+ metadata = NFDK_DESC_TX_CHAIN_META;
+ put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
+ put_unaligned_be32(FIELD_PREP(NFDK_META_LEN, 8) |
+ FIELD_PREP(NFDK_META_FIELDS,
+ NFP_NET_META_PORTID),
+ skb_push(skb, 4));
+ }
+
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
+ goto err_free;
+
+ /* DMA map all */
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
+ txd = &tx_ring->ktxds[wr_idx];
+ txbuf = &tx_ring->ktxbufs[wr_idx];
+
+ dma_len = skb_headlen(skb);
+ if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD)
+ type = NFDK_DESC_TX_TYPE_SIMPLE;
+ else
+ type = NFDK_DESC_TX_TYPE_GATHER;
+
+ dma_addr = dma_map_single(dp->dev, skb->data, dma_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, dma_addr))
+ goto err_warn_dma;
+
+ txbuf->skb = skb;
+ txbuf++;
+
+ txbuf->dma_addr = dma_addr;
+ txbuf++;
+
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
+
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ tmp_dlen = dlen_type & NFDK_DESC_TX_DMA_LEN_HEAD;
+ dma_len -= tmp_dlen;
+ dma_addr += tmp_dlen + 1;
+ txd++;
+
+ while (dma_len > 0) {
+ dma_len -= 1;
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN, dma_len);
+ txd->dma_len_type = cpu_to_le16(dlen_type);
+ nfp_desc_set_dma_addr(txd, dma_addr);
+
+ dlen_type &= NFDK_DESC_TX_DMA_LEN;
+ dma_len -= dlen_type;
+ dma_addr += dlen_type + 1;
+ txd++;
+ }
+
+ (txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+
+ /* Metadata desc */
+ txd->raw = cpu_to_le64(metadata);
+ txd++;
+
+ cnt = txd - tx_ring->ktxds - wr_idx;
+ if (unlikely(round_down(wr_idx, NFDK_TX_DESC_BLOCK_CNT) !=
+ round_down(wr_idx + cnt - 1, NFDK_TX_DESC_BLOCK_CNT)))
+ goto err_warn_overflow;
+
+ tx_ring->wr_p += cnt;
+ if (tx_ring->wr_p % NFDK_TX_DESC_BLOCK_CNT)
+ tx_ring->data_pending += skb->len;
+ else
+ tx_ring->data_pending = 0;
+
+ tx_ring->wr_ptr_add += cnt;
+ nfp_net_tx_xmit_more_flush(tx_ring);
+
+ return NETDEV_TX_OK;
+
+err_warn_overflow:
+ WARN_ONCE(1, "unable to fit packet into a descriptor wr_idx:%d head:%d frags:%d cnt:%d",
+ wr_idx, skb_headlen(skb), 0, cnt);
+ txbuf--;
+ dma_unmap_single(dp->dev, txbuf->dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ txbuf->raw = 0;
+err_warn_dma:
+ nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
+err_free:
+ u64_stats_update_begin(&r_vec->tx_sync);
+ r_vec->tx_errors++;
+ u64_stats_update_end(&r_vec->tx_sync);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&r_vec->queue)))
+ if (nfp_nfdk_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
+ return;
+}
+
+static bool
+nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
+{
+ u32 meta_type, meta_tag;
+
+ if (!nfp_app_ctrl_has_meta(nn->app))
+ return !meta_len;
+
+ if (meta_len != 8)
+ return false;
+
+ meta_type = get_unaligned_be32(data);
+ meta_tag = get_unaligned_be32(data + 4);
+
+ return (meta_type == NFP_NET_META_PORTID &&
+ meta_tag == NFP_META_PORT_ID_CTRL);
+}
+
+static bool
+nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
+ struct nfp_net_rx_buf *rxbuf;
+ struct nfp_net_rx_desc *rxd;
+ dma_addr_t new_dma_addr;
+ struct sk_buff *skb;
+ void *new_frag;
+ int idx;
+
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
+
+ rxd = &rx_ring->rxds[idx];
+ if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
+ return false;
+
+ /* Memory barrier to ensure that we won't do other reads
+ * before the DD bit.
+ */
+ dma_rmb();
+
+ rx_ring->rd_p++;
+
+ rxbuf = &rx_ring->rxbufs[idx];
+ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
+ data_len = le16_to_cpu(rxd->rxd.data_len);
+ pkt_len = data_len - meta_len;
+
+ pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
+ if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ pkt_off += meta_len;
+ else
+ pkt_off += dp->rx_offset;
+ meta_off = pkt_off - meta_len;
+
+ /* Stats update */
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_pkts++;
+ r_vec->rx_bytes += pkt_len;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
+
+ if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
+ nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
+ meta_len);
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+
+ skb = build_skb(rxbuf->frag, dp->fl_bufsz);
+ if (unlikely(!skb)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
+ return true;
+ }
+ new_frag = nfp_nfdk_napi_alloc_one(dp, &new_dma_addr);
+ if (unlikely(!new_frag)) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+ return true;
+ }
+
+ nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+ nfp_nfdk_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
+ skb_reserve(skb, pkt_off);
+ skb_put(skb, pkt_len);
+
+ nfp_app_ctrl_rx(nn->app, skb);
+
+ return true;
+}
+
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+{
+ struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
+ struct nfp_net *nn = r_vec->nfp_net;
+ struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
+
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
+ continue;
+
+ return budget;
+}
+
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t)
+{
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+
+ spin_lock(&r_vec->lock);
+ nfp_nfdk_tx_complete(r_vec->tx_ring, 0);
+ __nfp_ctrl_tx_queued(r_vec);
+ spin_unlock(&r_vec->lock);
+
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
new file mode 100644
index 000000000000..c41e0975eb73
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DP_NFDK_H_
+#define _NFP_DP_NFDK_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+#define NFDK_TX_DESC_PER_SIMPLE_PKT 2
+
+#define NFDK_TX_MAX_DATA_PER_HEAD SZ_4K
+#define NFDK_TX_MAX_DATA_PER_DESC SZ_16K
+#define NFDK_TX_DESC_BLOCK_SZ 256
+#define NFDK_TX_DESC_BLOCK_CNT (NFDK_TX_DESC_BLOCK_SZ / \
+ sizeof(struct nfp_nfdk_tx_desc))
+#define NFDK_TX_DESC_STOP_CNT (NFDK_TX_DESC_BLOCK_CNT * \
+ NFDK_TX_DESC_PER_SIMPLE_PKT)
+#define NFDK_TX_MAX_DATA_PER_BLOCK SZ_64K
+#define NFDK_TX_DESC_GATHER_MAX 17
+
+/* TX descriptor format */
+
+#define NFDK_DESC_TX_MSS_MASK GENMASK(13, 0)
+
+#define NFDK_DESC_TX_CHAIN_META BIT(3)
+#define NFDK_DESC_TX_ENCAP BIT(2)
+#define NFDK_DESC_TX_L4_CSUM BIT(1)
+#define NFDK_DESC_TX_L3_CSUM BIT(0)
+
+#define NFDK_DESC_TX_DMA_LEN_HEAD GENMASK(11, 0)
+#define NFDK_DESC_TX_TYPE_HEAD GENMASK(15, 12)
+#define NFDK_DESC_TX_DMA_LEN GENMASK(13, 0)
+#define NFDK_DESC_TX_TYPE_NOP 0
+#define NFDK_DESC_TX_TYPE_GATHER 1
+#define NFDK_DESC_TX_TYPE_TSO 2
+#define NFDK_DESC_TX_TYPE_SIMPLE 8
+#define NFDK_DESC_TX_EOP BIT(14)
+
+#define NFDK_META_LEN GENMASK(7, 0)
+#define NFDK_META_FIELDS GENMASK(31, 8)
+
+#define D_BLOCK_CPL(idx) (NFDK_TX_DESC_BLOCK_CNT - \
+ (idx) % NFDK_TX_DESC_BLOCK_CNT)
+
+struct nfp_nfdk_tx_desc {
+ union {
+ struct {
+ u8 dma_addr_hi; /* High bits of host buf address */
+ u8 padding; /* Must be zero */
+ __le16 dma_len_type; /* Length to DMA for this desc */
+ __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+ };
+
+ struct {
+ __le16 mss; /* MSS to be used for LSO */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
+ u8 lso_totsegs; /* LSO, total segments */
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ __le16 lso_meta_res; /* Rsvd bits in TSO metadata */
+ };
+
+ struct {
+ u8 flags; /* TX Flags, see @NFDK_DESC_TX_* */
+ u8 reserved[7]; /* meta byte placeholder */
+ };
+
+ __le32 vals[2];
+ __le64 raw;
+ };
+};
+
+/* The device don't make use of the 2 or 3 least significant bits of the address
+ * due to alignment constraints. The driver can make use of those bits to carry
+ * information about the buffer before giving it to the device.
+ *
+ * NOTE: The driver must clear the lower bits before handing the buffer to the
+ * device.
+ *
+ * - NFDK_TX_BUF_INFO_SOP - Start of a packet
+ * Mark the buffer as a start of a packet. This is used in the XDP TX process
+ * to stash virtual and DMA address so that they can be recycled when the TX
+ * operation is completed.
+ */
+#define NFDK_TX_BUF_PTR(val) ((val) & ~(sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO(val) ((val) & (sizeof(void *) - 1))
+#define NFDK_TX_BUF_INFO_SOP BIT(0)
+
+struct nfp_nfdk_tx_buf {
+ union {
+ /* First slot */
+ union {
+ struct sk_buff *skb;
+ void *frag;
+ unsigned long val;
+ };
+
+ /* 1 + nr_frags next slots */
+ dma_addr_t dma_addr;
+
+ /* TSO (optional) */
+ struct {
+ u32 pkt_cnt;
+ u32 real_len;
+ };
+
+ u64 raw;
+ };
+};
+
+static inline int nfp_nfdk_headlen_to_segs(unsigned int headlen)
+{
+ /* First descriptor fits less data, so adjust for that */
+ return DIV_ROUND_UP(headlen +
+ NFDK_TX_MAX_DATA_PER_DESC -
+ NFDK_TX_MAX_DATA_PER_HEAD,
+ NFDK_TX_MAX_DATA_PER_DESC);
+}
+
+int nfp_nfdk_poll(struct napi_struct *napi, int budget);
+netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev);
+bool
+nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
+void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/rings.c b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
new file mode 100644
index 000000000000..301f11108826
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/rings.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/seq_file.h>
+
+#include "../nfp_net.h"
+#include "../nfp_net_dp.h"
+#include "nfdk.h"
+
+static void
+nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct device *dev = dp->dev;
+ struct netdev_queue *nd_q;
+
+ while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
+ const skb_frag_t *frag, *fend;
+ unsigned int size, n_descs = 1;
+ struct nfp_nfdk_tx_buf *txbuf;
+ int nr_frags, rd_idx;
+ struct sk_buff *skb;
+
+ rd_idx = D_IDX(tx_ring, tx_ring->rd_p);
+ txbuf = &tx_ring->ktxbufs[rd_idx];
+
+ skb = txbuf->skb;
+ if (!skb) {
+ n_descs = D_BLOCK_CPL(tx_ring->rd_p);
+ goto next;
+ }
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ txbuf++;
+
+ /* Unmap head */
+ size = skb_headlen(skb);
+ dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
+ n_descs += nfp_nfdk_headlen_to_segs(size);
+ txbuf++;
+
+ frag = skb_shinfo(skb)->frags;
+ fend = frag + nr_frags;
+ for (; frag < fend; frag++) {
+ size = skb_frag_size(frag);
+ dma_unmap_page(dev, txbuf->dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ n_descs += DIV_ROUND_UP(size,
+ NFDK_TX_MAX_DATA_PER_DESC);
+ txbuf++;
+ }
+
+ if (skb_is_gso(skb))
+ n_descs++;
+
+ dev_kfree_skb_any(skb);
+next:
+ tx_ring->rd_p += n_descs;
+ }
+
+ memset(tx_ring->txds, 0, tx_ring->size);
+ tx_ring->data_pending = 0;
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
+ if (tx_ring->is_xdp || !dp->netdev)
+ return;
+
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+ netdev_tx_reset_queue(nd_q);
+}
+
+static void nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ kvfree(tx_ring->ktxbufs);
+
+ if (tx_ring->ktxds)
+ dma_free_coherent(dp->dev, tx_ring->size,
+ tx_ring->ktxds, tx_ring->dma);
+
+ tx_ring->cnt = 0;
+ tx_ring->txbufs = NULL;
+ tx_ring->txds = NULL;
+ tx_ring->dma = 0;
+ tx_ring->size = 0;
+}
+
+static int
+nfp_nfdk_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
+
+ tx_ring->cnt = dp->txd_cnt * NFDK_TX_DESC_PER_SIMPLE_PKT;
+ tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->ktxds));
+ tx_ring->ktxds = dma_alloc_coherent(dp->dev, tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->ktxds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
+ goto err_alloc;
+ }
+
+ tx_ring->ktxbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->ktxbufs),
+ GFP_KERNEL);
+ if (!tx_ring->ktxbufs)
+ goto err_alloc;
+
+ if (!tx_ring->is_xdp && dp->netdev)
+ netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
+ tx_ring->idx);
+
+ return 0;
+
+err_alloc:
+ nfp_nfdk_tx_ring_free(tx_ring);
+ return -ENOMEM;
+}
+
+static void
+nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+}
+
+static int
+nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ return 0;
+}
+
+static void
+nfp_nfdk_print_tx_descs(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ struct nfp_nfdk_tx_desc *txd;
+ u32 txd_cnt = tx_ring->cnt;
+ int i;
+
+ for (i = 0; i < txd_cnt; i++) {
+ txd = &tx_ring->ktxds[i];
+
+ seq_printf(file, "%04d: 0x%08x 0x%08x 0x%016llx", i,
+ txd->vals[0], txd->vals[1], tx_ring->ktxbufs[i].raw);
+
+ if (i == tx_ring->rd_p % txd_cnt)
+ seq_puts(file, " H_RD");
+ if (i == tx_ring->wr_p % txd_cnt)
+ seq_puts(file, " H_WR");
+ if (i == d_rd_p % txd_cnt)
+ seq_puts(file, " D_RD");
+ if (i == d_wr_p % txd_cnt)
+ seq_puts(file, " D_WR");
+
+ seq_putc(file, '\n');
+ }
+}
+
+#define NFP_NFDK_CFG_CTRL_SUPPORTED \
+ (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
+ NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
+ NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
+ NFP_NET_CFG_CTRL_RXVLAN | \
+ NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
+ NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
+ NFP_NET_CFG_CTRL_TXRWB | \
+ NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
+ NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
+ NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
+ NFP_NET_CFG_CTRL_LIVE_ADDR)
+
+const struct nfp_dp_ops nfp_nfdk_ops = {
+ .version = NFP_NFD_VER_NFDK,
+ .tx_min_desc_per_pkt = NFDK_TX_DESC_PER_SIMPLE_PKT,
+ .cap_mask = NFP_NFDK_CFG_CTRL_SUPPORTED,
+ .poll = nfp_nfdk_poll,
+ .ctrl_poll = nfp_nfdk_ctrl_poll,
+ .xmit = nfp_nfdk_tx,
+ .ctrl_tx_one = nfp_nfdk_ctrl_tx_one,
+ .rx_ring_fill_freelist = nfp_nfdk_rx_ring_fill_freelist,
+ .tx_ring_alloc = nfp_nfdk_tx_ring_alloc,
+ .tx_ring_reset = nfp_nfdk_tx_ring_reset,
+ .tx_ring_free = nfp_nfdk_tx_ring_free,
+ .tx_ring_bufs_alloc = nfp_nfdk_tx_ring_bufs_alloc,
+ .tx_ring_bufs_free = nfp_nfdk_tx_ring_bufs_free,
+ .print_tx_descs = nfp_nfdk_print_tx_descs
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 3a973282b2bb..09f250e74dfa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -121,7 +121,7 @@ struct nfp_reprs *
nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type)
{
return rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
}
struct nfp_reprs *
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 3e9baff07100..dd56207df246 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -75,7 +75,7 @@ extern const struct nfp_app_type app_abm;
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
- * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
+ * @eswitch_mode_set: set SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
* @dev_get: get representor or internal port representing netdev
@@ -174,6 +174,16 @@ struct nfp_app {
void *priv;
};
+static inline void assert_nfp_app_locked(struct nfp_app *app)
+{
+ devl_assert_locked(priv_to_devlink(app->pf));
+}
+
+static inline bool nfp_app_is_locked(struct nfp_app *app)
+{
+ return devl_lock_is_held(priv_to_devlink(app->pf));
+}
+
void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index bea978df7713..405786c00334 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -26,12 +26,11 @@ nfp_devlink_fill_eth_port(struct nfp_port *port,
}
static int
-nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf,
+ struct devlink_port *dl_port,
struct nfp_eth_table_port *copy)
{
- struct nfp_port *port;
-
- port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+ struct nfp_port *port = container_of(dl_port, struct nfp_port, dl_port);
return nfp_devlink_fill_eth_port(port, copy);
}
@@ -62,7 +61,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
}
static int
-nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
unsigned int count, struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -70,33 +69,25 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- mutex_lock(&pf->lock);
-
rtnl_lock();
- ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (eth_port.port_lanes % count) {
- ret = -EINVAL;
- goto out;
- }
+ if (eth_port.port_lanes % count)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
if (eth_port.lanes == 10 && count == 2)
lanes = 8 / count;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- mutex_unlock(&pf->lock);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
-nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
+nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -104,29 +95,21 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- mutex_lock(&pf->lock);
-
rtnl_lock();
- ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port, &eth_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (!eth_port.is_split) {
- ret = -EINVAL;
- goto out;
- }
+ if (!eth_port.is_split)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
if (eth_port.port_lanes == 8)
lanes = 10;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- mutex_unlock(&pf->lock);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
@@ -161,13 +144,8 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
- int ret;
-
- mutex_lock(&pf->lock);
- ret = nfp_app_eswitch_mode_set(pf->app, mode);
- mutex_unlock(&pf->lock);
- return ret;
+ return nfp_app_eswitch_mode_set(pf->app, mode);
}
static const struct nfp_devlink_versions_simple {
@@ -375,12 +353,12 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
devlink = priv_to_devlink(app->pf);
- return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+ return devl_port_register(devlink, &port->dl_port, port->eth_id);
}
void nfp_devlink_port_unregister(struct nfp_port *port)
{
- devlink_port_unregister(&port->dl_port);
+ devl_port_unregister(&port->dl_port);
}
void nfp_devlink_port_type_eth_set(struct nfp_port *port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index bb3b8a7f6c5d..eeda39e34f84 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -19,6 +19,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
@@ -32,17 +33,21 @@
static const char nfp_driver_name[] = "nfp";
static const struct pci_device_id nfp_pci_device_ids[] = {
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000,
},
{ 0, } /* Required last entry. */
};
@@ -222,6 +227,7 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
int err;
if (num_vfs > pf->limit_vfs) {
@@ -236,7 +242,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
- mutex_lock(&pf->lock);
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
@@ -250,11 +257,11 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
return num_vfs;
err_sriov_disable:
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
pci_disable_sriov(pdev);
return err;
#endif
@@ -265,8 +272,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
- mutex_lock(&pf->lock);
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
/* If the VFs are assigned we cannot shut down SR-IOV without
* causing issues, so just leave the hardware available but
@@ -274,7 +283,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
*/
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
return -EPERM;
}
@@ -282,7 +291,7 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
pf->num_vfs = 0;
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
pci_disable_sriov(pdev);
dev_dbg(&pdev->dev, "Removed VFs.\n");
@@ -667,6 +676,7 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ const struct nfp_dev_info *dev_info;
struct devlink *devlink;
struct nfp_pf *pf;
int err;
@@ -675,14 +685,15 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF)
dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n");
+ dev_info = &nfp_dev_info[pci_id->driver_data];
+
err = pci_enable_device(pdev);
if (err < 0)
return err;
pci_set_master(pdev);
- err = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
if (err)
goto err_pci_disable;
@@ -700,9 +711,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf = devlink_priv(devlink);
INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
- mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
+ pf->dev_info = dev_info;
pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
if (!pf->wq) {
@@ -710,7 +721,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_pci_priv_unset;
}
- pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
+ pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info);
if (IS_ERR(pf->cpp)) {
err = PTR_ERR(pf->cpp);
goto err_disable_msix;
@@ -790,7 +801,6 @@ err_disable_msix:
destroy_workqueue(pf->wq);
err_pci_priv_unset:
pci_set_drvdata(pdev, NULL);
- mutex_destroy(&pf->lock);
devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
@@ -827,7 +837,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
kfree(pf->eth_tbl);
kfree(pf->nspi);
- mutex_destroy(&pf->lock);
devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index a7dede946a33..f56ca11de134 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -13,7 +13,6 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/msi.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
@@ -48,6 +47,7 @@ struct nfp_dumpspec {
/**
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
+ * @dev_info: NFP ASIC params
* @cpp: Pointer to the CPP handle
* @app: Pointer to the APP handle
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
@@ -84,10 +84,12 @@ struct nfp_dumpspec {
* @port_refresh_work: Work entry for taking netdevs out
* @shared_bufs: Array of shared buffer structures if FW has any SBs
* @num_shared_bufs: Number of elements in @shared_bufs
- * @lock: Protects all fields which may change after probe
+ *
+ * Fields which may change after proble are protected by devlink instance lock.
*/
struct nfp_pf {
struct pci_dev *pdev;
+ const struct nfp_dev_info *dev_info;
struct nfp_cpp *cpp;
@@ -139,8 +141,6 @@ struct nfp_pf {
struct nfp_shared_buf *shared_bufs;
unsigned int num_shared_bufs;
-
- struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 0b1865e9f0b5..428783b7018b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -63,9 +63,6 @@
#define NFP_NET_Q0_BAR 2
#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
-/* Max bits in DMA address */
-#define NFP_NET_MAX_DMA_BITS 40
-
/* Default size for MTU and freelist buffer sizes */
#define NFP_NET_DEFAULT_MTU 1500U
@@ -85,11 +82,6 @@
NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
#define NFP_NET_MAX_IRQS (NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
-#define NFP_NET_MIN_TX_DESCS 256 /* Min. # of Tx descs per ring */
-#define NFP_NET_MIN_RX_DESCS 256 /* Min. # of Rx descs per ring */
-#define NFP_NET_MAX_TX_DESCS (256 * 1024) /* Max. # of Tx descs per ring */
-#define NFP_NET_MAX_RX_DESCS (256 * 1024) /* Max. # of Rx descs per ring */
-
#define NFP_NET_TX_DESCS_DEFAULT 4096 /* Default # of Tx descs per ring */
#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
@@ -105,10 +97,19 @@
/* Forward declarations */
struct nfp_cpp;
+struct nfp_dev_info;
+struct nfp_dp_ops;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
struct nfp_port;
+struct xsk_buff_pool;
+
+struct nfp_nfd3_tx_desc;
+struct nfp_nfd3_tx_buf;
+
+struct nfp_nfdk_tx_desc;
+struct nfp_nfdk_tx_buf;
/* Convenience macro for wrapping descriptor index on ring size */
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
@@ -123,86 +124,25 @@ struct nfp_port;
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
-/* TX descriptor format */
-
-#define PCIE_DESC_TX_EOP BIT(7)
-#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
-#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
-
-/* Flags in the host TX descriptor */
-#define PCIE_DESC_TX_CSUM BIT(7)
-#define PCIE_DESC_TX_IP4_CSUM BIT(6)
-#define PCIE_DESC_TX_TCP_CSUM BIT(5)
-#define PCIE_DESC_TX_UDP_CSUM BIT(4)
-#define PCIE_DESC_TX_VLAN BIT(3)
-#define PCIE_DESC_TX_LSO BIT(2)
-#define PCIE_DESC_TX_ENCAP BIT(1)
-#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
-
-struct nfp_net_tx_desc {
- union {
- struct {
- u8 dma_addr_hi; /* High bits of host buf address */
- __le16 dma_len; /* Length to DMA for this desc */
- u8 offset_eop; /* Offset in buf where pkt starts +
- * highest bit is eop flag.
- */
- __le32 dma_addr_lo; /* Low 32bit of host buf addr */
-
- __le16 mss; /* MSS to be used for LSO */
- u8 lso_hdrlen; /* LSO, TCP payload offset */
- u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
- union {
- struct {
- u8 l3_offset; /* L3 header offset */
- u8 l4_offset; /* L4 header offset */
- };
- __le16 vlan; /* VLAN tag to add if indicated */
- };
- __le16 data_len; /* Length of frame + meta data */
- } __packed;
- __le32 vals[4];
- __le64 vals8[2];
- };
-};
-
-/**
- * struct nfp_net_tx_buf - software TX buffer descriptor
- * @skb: normal ring, sk_buff associated with this buffer
- * @frag: XDP ring, page frag associated with this buffer
- * @dma_addr: DMA mapping address of the buffer
- * @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
- * @pkt_cnt: Number of packets to be produced out of the skb associated
- * with this buffer (valid only on the head's buffer).
- * Will be 1 for all non-TSO packets.
- * @real_len: Number of bytes which to be produced out of the skb (valid only
- * on the head's buffer). Equal to skb->len for non-TSO packets.
- */
-struct nfp_net_tx_buf {
- union {
- struct sk_buff *skb;
- void *frag;
- };
- dma_addr_t dma_addr;
- short int fidx;
- u16 pkt_cnt;
- u32 real_len;
-};
-
/**
* struct nfp_net_tx_ring - TX ring structure
* @r_vec: Back pointer to ring vector structure
* @idx: Ring index from Linux's perspective
- * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
+ * @data_pending: number of bytes added to current block (NFDK only)
* @qcp_q: Pointer to base of the QCP TX queue
+ * @txrwb: TX pointer write back area
* @cnt: Size of the queue in number of descriptors
* @wr_p: TX ring write pointer (free running)
* @rd_p: TX ring read pointer (free running)
* @qcp_rd_p: Local copy of QCP TX queue read pointer
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for .xmit_more delayed kick)
- * @txbufs: Array of transmitted TX buffers, to free on transmit
- * @txds: Virtual address of TX ring in host memory
+ * @txbufs: Array of transmitted TX buffers, to free on transmit (NFD3)
+ * @ktxbufs: Array of transmitted TX buffers, to free on transmit (NFDK)
+ * @txds: Virtual address of TX ring in host memory (NFD3)
+ * @ktxds: Virtual address of TX ring in host memory (NFDK)
+ *
+ * @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
* @dma: DMA address of the TX ring
* @size: Size, in bytes, of the TX ring (needed to free)
* @is_xdp: Is this a XDP TX ring?
@@ -210,9 +150,10 @@ struct nfp_net_tx_buf {
struct nfp_net_tx_ring {
struct nfp_net_r_vector *r_vec;
- u32 idx;
- int qcidx;
+ u16 idx;
+ u16 data_pending;
u8 __iomem *qcp_q;
+ u64 *txrwb;
u32 cnt;
u32 wr_p;
@@ -221,8 +162,17 @@ struct nfp_net_tx_ring {
u32 wr_ptr_add;
- struct nfp_net_tx_buf *txbufs;
- struct nfp_net_tx_desc *txds;
+ union {
+ struct nfp_nfd3_tx_buf *txbufs;
+ struct nfp_nfdk_tx_buf *ktxbufs;
+ };
+ union {
+ struct nfp_nfd3_tx_desc *txds;
+ struct nfp_nfdk_tx_desc *ktxds;
+ };
+
+ /* Cold data follows */
+ int qcidx;
dma_addr_t dma;
size_t size;
@@ -315,6 +265,16 @@ struct nfp_net_rx_buf {
};
/**
+ * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor
+ * @dma_addr: DMA mapping address of the buffer
+ * @xdp: XSK buffer pool handle (for AF_XDP)
+ */
+struct nfp_net_xsk_rx_buf {
+ dma_addr_t dma_addr;
+ struct xdp_buff *xdp;
+};
+
+/**
* struct nfp_net_rx_ring - RX ring structure
* @r_vec: Back pointer to ring vector structure
* @cnt: Size of the queue in number of descriptors
@@ -324,6 +284,7 @@ struct nfp_net_rx_buf {
* @fl_qcidx: Queue Controller Peripheral (QCP) queue index for the freelist
* @qcp_fl: Pointer to base of the QCP freelist queue
* @rxbufs: Array of transmitted FL/RX buffers
+ * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP)
* @rxds: Virtual address of FL/RX ring in host memory
* @xdp_rxq: RX-ring info avail for XDP
* @dma: DMA address of the FL/RX ring
@@ -342,6 +303,7 @@ struct nfp_net_rx_ring {
u8 __iomem *qcp_fl;
struct nfp_net_rx_buf *rxbufs;
+ struct nfp_net_xsk_rx_buf *xsk_rxbufs;
struct nfp_net_rx_desc *rxds;
struct xdp_rxq_info xdp_rxq;
@@ -360,6 +322,7 @@ struct nfp_net_rx_ring {
* @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
+ * @xsk_pool: XSK buffer pool active on vector queue pair (for AF_XDP)
* @irq_entry: MSI-X table entry (use for talking to the device)
* @event_ctr: Number of interrupt
* @rx_dim: Dynamic interrupt moderation structure for RX
@@ -431,6 +394,7 @@ struct nfp_net_r_vector {
u64 rx_replace_buf_alloc_fail;
struct nfp_net_tx_ring *xdp_ring;
+ struct xsk_buff_pool *xsk_pool;
struct u64_stats_sync tx_sync;
u64 tx_pkts;
@@ -460,13 +424,17 @@ struct nfp_net_fw_version {
u8 minor;
u8 major;
u8 class;
- u8 resv;
+
+ /* This byte can be exploited for more use, currently,
+ * BIT0: dp type, BIT[7:1]: reserved
+ */
+ u8 extend;
} __packed;
static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
- u8 resv, u8 class, u8 major, u8 minor)
+ u8 extend, u8 class, u8 major, u8 minor)
{
- return fw_ver->resv == resv &&
+ return fw_ver->extend == extend &&
fw_ver->class == class &&
fw_ver->major == major &&
fw_ver->minor == minor;
@@ -494,13 +462,17 @@ struct nfp_stat_pair {
* @rx_rings: Array of pre-allocated RX ring structures
* @ctrl_bar: Pointer to mapped control BAR
*
- * @txd_cnt: Size of the TX ring in number of descriptors
- * @rxd_cnt: Size of the RX ring in number of descriptors
+ * @ops: Callbacks and parameters for this vNIC's NFD version
+ * @txrwb: TX pointer write back area (indexed by queue id)
+ * @txrwb_dma: TX pointer write back area DMA address
+ * @txd_cnt: Size of the TX ring in number of min size packets
+ * @rxd_cnt: Size of the RX ring in number of min size packets
* @num_r_vecs: Number of used ring vectors
* @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings
* @mtu: Device MTU
+ * @xsk_pools: XSK buffer pools, @max_r_vecs in size (for AF_XDP).
*/
struct nfp_net_dp {
struct device *dev;
@@ -527,6 +499,11 @@ struct nfp_net_dp {
/* Cold data follows */
+ const struct nfp_dp_ops *ops;
+
+ u64 *txrwb;
+ dma_addr_t txrwb_dma;
+
unsigned int txd_cnt;
unsigned int rxd_cnt;
@@ -537,11 +514,14 @@ struct nfp_net_dp {
unsigned int num_rx_rings;
unsigned int mtu;
+
+ struct xsk_buff_pool **xsk_pools;
};
/**
* struct nfp_net - NFP network device structure
* @dp: Datapath structure
+ * @dev_info: NFP ASIC params
* @id: vNIC id within the PF (0 for VFs)
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
@@ -615,6 +595,7 @@ struct nfp_net_dp {
struct nfp_net {
struct nfp_net_dp dp;
+ const struct nfp_dev_info *dev_info;
struct nfp_net_fw_version fw_ver;
u32 id;
@@ -767,7 +748,6 @@ static inline void nn_pci_flush(struct nfp_net *nn)
* either add to a pointer or to read the pointer value.
*/
#define NFP_QCP_QUEUE_ADDR_SZ 0x800
-#define NFP_QCP_QUEUE_AREA_SZ 0x80000
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * NFP_QCP_QUEUE_ADDR_SZ)
#define NFP_QCP_QUEUE_ADD_RPTR 0x0000
#define NFP_QCP_QUEUE_ADD_WPTR 0x0004
@@ -776,50 +756,21 @@ static inline void nn_pci_flush(struct nfp_net *nn)
#define NFP_QCP_QUEUE_STS_HI 0x000c
#define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask 0x3ffff
-/* The offset of a QCP queues in the PCIe Target */
-#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
-
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
enum nfp_qcp_ptr {
NFP_QCP_READ_PTR = 0,
NFP_QCP_WRITE_PTR
};
-/* There appear to be an *undocumented* upper limit on the value which
- * one can add to a queue and that value is either 0x3f or 0x7f. We
- * go with 0x3f as a conservative measure.
- */
-#define NFP_QCP_MAX_ADD 0x3f
-
-static inline void _nfp_qcp_ptr_add(u8 __iomem *q,
- enum nfp_qcp_ptr ptr, u32 val)
-{
- u32 off;
-
- if (ptr == NFP_QCP_READ_PTR)
- off = NFP_QCP_QUEUE_ADD_RPTR;
- else
- off = NFP_QCP_QUEUE_ADD_WPTR;
-
- while (val > NFP_QCP_MAX_ADD) {
- writel(NFP_QCP_MAX_ADD, q + off);
- val -= NFP_QCP_MAX_ADD;
- }
-
- writel(val, q + off);
-}
-
/**
* nfp_qcp_rd_ptr_add() - Add the value to the read pointer of a queue
*
* @q: Base address for queue structure
* @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
*/
static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
{
- _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val);
+ writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
}
/**
@@ -827,12 +778,10 @@ static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
*
* @q: Base address for queue structure
* @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
*/
static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
{
- _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val);
+ writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
}
static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
@@ -875,6 +824,8 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
}
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue);
+
static inline bool nfp_net_is_data_vnic(struct nfp_net *nn)
{
WARN_ON_ONCE(!nn->dp.netdev && nn->port);
@@ -921,11 +872,13 @@ static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
/* Globals */
extern const char nfp_driver_version[];
-extern const struct net_device_ops nfp_net_netdev_ops;
+extern const struct net_device_ops nfp_nfd3_netdev_ops;
+extern const struct net_device_ops nfp_nfdk_netdev_ops;
static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
{
- return netdev->netdev_ops == &nfp_net_netdev_ops;
+ return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
+ netdev->netdev_ops == &nfp_nfdk_netdev_ops;
}
static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
@@ -941,7 +894,8 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+ void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
@@ -972,6 +926,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
+struct sk_buff *
+nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 79257ec41987..b412670d89b2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
/*
* nfp_net_common.c
@@ -13,7 +13,6 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -38,13 +37,17 @@
#include <net/tls.h>
#include <net/vxlan.h>
+#include <net/xdp_sock_drv.h>
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
#include "ccm.h"
#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_net_dp.h"
#include "nfp_net_sriov.h"
+#include "nfp_net_xsk.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
#include "crypto/fw.h"
@@ -63,33 +66,10 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
-static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
{
- return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void
-nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
- dma_sync_single_for_device(dp->dev, dma_addr,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir);
-}
-
-static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
-{
- dma_unmap_single_attrs(dp->dev, dma_addr,
- dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
- dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-}
-
-static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
- unsigned int len)
-{
- dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
- len, dp->rx_dma_dir);
+ queue &= dev_info->qc_idx_mask;
+ return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
}
/* Firmware reconfig
@@ -375,19 +355,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
*/
/**
- * nfp_net_irq_unmask() - Unmask automasked interrupt
- * @nn: NFP Network structure
- * @entry_nr: MSI-X table entry
- *
- * Clear the ICR for the IRQ entry.
- */
-static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
-{
- nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
- nn_pci_flush(nn);
-}
-
-/**
* nfp_net_irqs_alloc() - allocates MSI-X irqs
* @pdev: PCI device structure
* @irq_entries: Array to be initialized and used to hold the irq entries
@@ -569,49 +536,6 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
}
/**
- * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
- * @tx_ring: TX ring structure
- * @r_vec: IRQ vector servicing this ring
- * @idx: Ring index
- * @is_xdp: Is this an XDP TX ring?
- */
-static void
-nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx,
- bool is_xdp)
-{
- struct nfp_net *nn = r_vec->nfp_net;
-
- tx_ring->idx = idx;
- tx_ring->r_vec = r_vec;
- tx_ring->is_xdp = is_xdp;
- u64_stats_init(&tx_ring->r_vec->tx_sync);
-
- tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
- tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
-}
-
-/**
- * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
- * @rx_ring: RX ring structure
- * @r_vec: IRQ vector servicing this ring
- * @idx: Ring index
- */
-static void
-nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_r_vector *r_vec, unsigned int idx)
-{
- struct nfp_net *nn = r_vec->nfp_net;
-
- rx_ring->idx = idx;
- rx_ring->r_vec = r_vec;
- u64_stats_init(&rx_ring->r_vec->rx_sync);
-
- rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
- rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
-}
-
-/**
* nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
* @nn: NFP Network structure
* @ctrl_offset: Control BAR offset where IRQ configuration should be written
@@ -658,178 +582,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
-/* Transmit
- *
- * One queue controller peripheral queue is used for transmit. The
- * driver en-queues packets for transmit by advancing the write
- * pointer. The device indicates that packets have transmitted by
- * advancing the read pointer. The driver maintains a local copy of
- * the read and write pointer in @struct nfp_net_tx_ring. The driver
- * keeps @wr_p in sync with the queue controller write pointer and can
- * determine how many packets have been transmitted by comparing its
- * copy of the read pointer @rd_p with the read pointer maintained by
- * the queue controller peripheral.
- */
-
-/**
- * nfp_net_tx_full() - Check if the TX ring is full
- * @tx_ring: TX ring to check
- * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
- *
- * This function checks, based on the *host copy* of read/write
- * pointer if a given TX ring is full. The real TX queue may have
- * some newly made available slots.
- *
- * Return: True if the ring is full.
- */
-static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
-{
- return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
-}
-
-/* Wrappers for deciding when to stop and restart TX queues */
-static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
-{
- return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
-}
-
-static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
-{
- return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
-}
-
-/**
- * nfp_net_tx_ring_stop() - stop tx ring
- * @nd_q: netdev queue
- * @tx_ring: driver tx queue structure
- *
- * Safely stop TX ring. Remember that while we are running .start_xmit()
- * someone else may be cleaning the TX ring completions so we need to be
- * extra careful here.
- */
-static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
- struct nfp_net_tx_ring *tx_ring)
-{
- netif_tx_stop_queue(nd_q);
-
- /* We can race with the TX completion out of NAPI so recheck */
- smp_mb();
- if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
- netif_tx_start_queue(nd_q);
-}
-
-/**
- * nfp_net_tx_tso() - Set up Tx descriptor for LSO
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to HW TX descriptor
- * @skb: Pointer to SKB
- * @md_bytes: Prepend length
- *
- * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
- * Return error on packet header greater than maximum supported LSO header size.
- */
-static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
- struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb,
- u32 md_bytes)
-{
- u32 l3_offset, l4_offset, hdrlen;
- u16 mss;
-
- if (!skb_is_gso(skb))
- return;
-
- if (!skb->encapsulation) {
- l3_offset = skb_network_offset(skb);
- l4_offset = skb_transport_offset(skb);
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- } else {
- l3_offset = skb_inner_network_offset(skb);
- l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_transport_header(skb) - skb->data +
- inner_tcp_hdrlen(skb);
- }
-
- txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
- txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
-
- mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->l3_offset = l3_offset - md_bytes;
- txd->l4_offset = l4_offset - md_bytes;
- txd->lso_hdrlen = hdrlen - md_bytes;
- txd->mss = cpu_to_le16(mss);
- txd->flags |= PCIE_DESC_TX_LSO;
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_lso++;
- u64_stats_update_end(&r_vec->tx_sync);
-}
-
-/**
- * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
- * @dp: NFP Net data path struct
- * @r_vec: per-ring structure
- * @txbuf: Pointer to driver soft TX descriptor
- * @txd: Pointer to TX descriptor
- * @skb: Pointer to SKB
- *
- * This function sets the TX checksum flags in the TX descriptor based
- * on the configuration and the protocol of the packet to be transmitted.
- */
-static void nfp_net_tx_csum(struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec,
- struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb)
-{
- struct ipv6hdr *ipv6h;
- struct iphdr *iph;
- u8 l4_hdr;
-
- if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
- return;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return;
-
- txd->flags |= PCIE_DESC_TX_CSUM;
- if (skb->encapsulation)
- txd->flags |= PCIE_DESC_TX_ENCAP;
-
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-
- if (iph->version == 4) {
- txd->flags |= PCIE_DESC_TX_IP4_CSUM;
- l4_hdr = iph->protocol;
- } else if (ipv6h->version == 6) {
- l4_hdr = ipv6h->nexthdr;
- } else {
- nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
- return;
- }
-
- switch (l4_hdr) {
- case IPPROTO_TCP:
- txd->flags |= PCIE_DESC_TX_TCP_CSUM;
- break;
- case IPPROTO_UDP:
- txd->flags |= PCIE_DESC_TX_UDP_CSUM;
- break;
- default:
- nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
- return;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- if (skb->encapsulation)
- r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
- else
- r_vec->hw_csum_tx += txbuf->pkt_cnt;
- u64_stats_update_end(&r_vec->tx_sync);
-}
-
-static struct sk_buff *
+struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
{
@@ -901,7 +654,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
return skb;
}
-static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
+void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
{
#ifdef CONFIG_TLS_DEVICE
struct nfp_net_tls_offload_ctx *ntls;
@@ -923,411 +676,6 @@ static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
#endif
}
-static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
-{
- wmb();
- nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
- tx_ring->wr_ptr_add = 0;
-}
-
-static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
-{
- struct metadata_dst *md_dst = skb_metadata_dst(skb);
- unsigned char *data;
- u32 meta_id = 0;
- int md_bytes;
-
- if (likely(!md_dst && !tls_handle))
- return 0;
- if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX)) {
- if (!tls_handle)
- return 0;
- md_dst = NULL;
- }
-
- md_bytes = 4 + !!md_dst * 4 + !!tls_handle * 8;
-
- if (unlikely(skb_cow_head(skb, md_bytes)))
- return -ENOMEM;
-
- meta_id = 0;
- data = skb_push(skb, md_bytes) + md_bytes;
- if (md_dst) {
- data -= 4;
- put_unaligned_be32(md_dst->u.port_info.port_id, data);
- meta_id = NFP_NET_META_PORTID;
- }
- if (tls_handle) {
- /* conn handle is opaque, we just use u64 to be able to quickly
- * compare it to zero
- */
- data -= 8;
- memcpy(data, &tls_handle, sizeof(tls_handle));
- meta_id <<= NFP_NET_META_FIELD_SIZE;
- meta_id |= NFP_NET_META_CONN_HANDLE;
- }
-
- data -= 4;
- put_unaligned_be32(meta_id, data);
-
- return md_bytes;
-}
-
-/**
- * nfp_net_tx() - Main transmit entry point
- * @skb: SKB to transmit
- * @netdev: netdev structure
- *
- * Return: NETDEV_TX_OK on success.
- */
-static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- const skb_frag_t *frag;
- int f, nr_frags, wr_idx, md_bytes;
- struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_r_vector *r_vec;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- struct netdev_queue *nd_q;
- struct nfp_net_dp *dp;
- dma_addr_t dma_addr;
- unsigned int fsize;
- u64 tls_handle = 0;
- u16 qidx;
-
- dp = &nn->dp;
- qidx = skb_get_queue_mapping(skb);
- tx_ring = &dp->tx_rings[qidx];
- r_vec = tx_ring->r_vec;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
- nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
- qidx, tx_ring->wr_p, tx_ring->rd_p);
- nd_q = netdev_get_tx_queue(dp->netdev, qidx);
- netif_tx_stop_queue(nd_q);
- nfp_net_tx_xmit_more_flush(tx_ring);
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_busy++;
- u64_stats_update_end(&r_vec->tx_sync);
- return NETDEV_TX_BUSY;
- }
-
- skb = nfp_net_tls_tx(dp, r_vec, skb, &tls_handle, &nr_frags);
- if (unlikely(!skb)) {
- nfp_net_tx_xmit_more_flush(tx_ring);
- return NETDEV_TX_OK;
- }
-
- md_bytes = nfp_net_prep_tx_meta(skb, tls_handle);
- if (unlikely(md_bytes < 0))
- goto err_flush;
-
- /* Start with the head skbuf */
- dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_dma_err;
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
- txbuf->skb = skb;
- txbuf->dma_addr = dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = skb->len;
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
- txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->data_len = cpu_to_le16(skb->len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
- nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
- nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
- if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
- txd->flags |= PCIE_DESC_TX_VLAN;
- txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
- }
-
- /* Gather DMA */
- if (nr_frags > 0) {
- __le64 second_half;
-
- /* all descs must match except for in addr, length and eop */
- second_half = txd->vals8[1];
-
- for (f = 0; f < nr_frags; f++) {
- frag = &skb_shinfo(skb)->frags[f];
- fsize = skb_frag_size(frag);
-
- dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
- fsize, DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_unmap;
-
- wr_idx = D_IDX(tx_ring, wr_idx + 1);
- tx_ring->txbufs[wr_idx].skb = skb;
- tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
- tx_ring->txbufs[wr_idx].fidx = f;
-
- txd = &tx_ring->txds[wr_idx];
- txd->dma_len = cpu_to_le16(fsize);
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop = md_bytes |
- ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
- txd->vals8[1] = second_half;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_gather++;
- u64_stats_update_end(&r_vec->tx_sync);
- }
-
- skb_tx_timestamp(skb);
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
-
- tx_ring->wr_p += nr_frags + 1;
- if (nfp_net_tx_ring_should_stop(tx_ring))
- nfp_net_tx_ring_stop(nd_q, tx_ring);
-
- tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
- nfp_net_tx_xmit_more_flush(tx_ring);
-
- return NETDEV_TX_OK;
-
-err_unmap:
- while (--f >= 0) {
- frag = &skb_shinfo(skb)->frags[f];
- dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- tx_ring->txbufs[wr_idx].skb = NULL;
- tx_ring->txbufs[wr_idx].dma_addr = 0;
- tx_ring->txbufs[wr_idx].fidx = -2;
- wr_idx = wr_idx - 1;
- if (wr_idx < 0)
- wr_idx += tx_ring->cnt;
- }
- dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- tx_ring->txbufs[wr_idx].skb = NULL;
- tx_ring->txbufs[wr_idx].dma_addr = 0;
- tx_ring->txbufs[wr_idx].fidx = -2;
-err_dma_err:
- nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
-err_flush:
- nfp_net_tx_xmit_more_flush(tx_ring);
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_errors++;
- u64_stats_update_end(&r_vec->tx_sync);
- nfp_net_tls_tx_undo(skb, tls_handle);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/**
- * nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring: TX ring structure
- * @budget: NAPI budget (only used as bool to determine if in NAPI context)
- */
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- struct netdev_queue *nd_q;
- u32 done_pkts = 0, done_bytes = 0;
- u32 qcp_rd_p;
- int todo;
-
- if (tx_ring->wr_p == tx_ring->rd_p)
- return;
-
- /* Work out how many descriptors have been transmitted */
- qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
- if (qcp_rd_p == tx_ring->qcp_rd_p)
- return;
-
- todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
- while (todo--) {
- const skb_frag_t *frag;
- struct nfp_net_tx_buf *tx_buf;
- struct sk_buff *skb;
- int fidx, nr_frags;
- int idx;
-
- idx = D_IDX(tx_ring, tx_ring->rd_p++);
- tx_buf = &tx_ring->txbufs[idx];
-
- skb = tx_buf->skb;
- if (!skb)
- continue;
-
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_buf->fidx;
-
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(dp->dev, tx_buf->dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
-
- done_pkts += tx_buf->pkt_cnt;
- done_bytes += tx_buf->real_len;
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(dp->dev, tx_buf->dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- napi_consume_skb(skb, budget);
-
- tx_buf->dma_addr = 0;
- tx_buf->skb = NULL;
- tx_buf->fidx = -2;
- }
-
- tx_ring->qcp_rd_p = qcp_rd_p;
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_bytes += done_bytes;
- r_vec->tx_pkts += done_pkts;
- u64_stats_update_end(&r_vec->tx_sync);
-
- if (!dp->netdev)
- return;
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
- netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
- if (nfp_net_tx_ring_should_wake(tx_ring)) {
- /* Make sure TX thread will see updated tx_ring->rd_p */
- smp_mb();
-
- if (unlikely(netif_tx_queue_stopped(nd_q)))
- netif_tx_wake_queue(nd_q);
- }
-
- WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
- tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-}
-
-static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- u32 done_pkts = 0, done_bytes = 0;
- bool done_all;
- int idx, todo;
- u32 qcp_rd_p;
-
- /* Work out how many descriptors have been transmitted */
- qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
-
- if (qcp_rd_p == tx_ring->qcp_rd_p)
- return true;
-
- todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
-
- done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
- todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
-
- tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
-
- done_pkts = todo;
- while (todo--) {
- idx = D_IDX(tx_ring, tx_ring->rd_p);
- tx_ring->rd_p++;
-
- done_bytes += tx_ring->txbufs[idx].real_len;
- }
-
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_bytes += done_bytes;
- r_vec->tx_pkts += done_pkts;
- u64_stats_update_end(&r_vec->tx_sync);
-
- WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
- tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
-
- return done_all;
-}
-
-/**
- * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
- * @dp: NFP Net data path struct
- * @tx_ring: TX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void
-nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
- const skb_frag_t *frag;
- struct netdev_queue *nd_q;
-
- while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
- struct nfp_net_tx_buf *tx_buf;
- struct sk_buff *skb;
- int idx, nr_frags;
-
- idx = D_IDX(tx_ring, tx_ring->rd_p);
- tx_buf = &tx_ring->txbufs[idx];
-
- skb = tx_ring->txbufs[idx].skb;
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- if (tx_buf->fidx == -1) {
- /* unmap head */
- dma_unmap_single(dp->dev, tx_buf->dma_addr,
- skb_headlen(skb), DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
- dma_unmap_page(dp->dev, tx_buf->dma_addr,
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (tx_buf->fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
-
- tx_buf->dma_addr = 0;
- tx_buf->skb = NULL;
- tx_buf->fidx = -2;
-
- tx_ring->qcp_rd_p++;
- tx_ring->rd_p++;
- }
-
- memset(tx_ring->txds, 0, tx_ring->size);
- tx_ring->wr_p = 0;
- tx_ring->rd_p = 0;
- tx_ring->qcp_rd_p = 0;
- tx_ring->wr_ptr_add = 0;
-
- if (tx_ring->is_xdp || !dp->netdev)
- return;
-
- nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
- netdev_tx_reset_queue(nd_q);
-}
-
static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -1335,1008 +683,43 @@ static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
}
-/* Receive processing
- */
+/* Receive processing */
static unsigned int
-nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
+nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
{
- unsigned int fl_bufsz;
+ unsigned int fl_bufsz = 0;
- fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
- fl_bufsz += dp->rx_dma_off;
if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
fl_bufsz += NFP_NET_MAX_PREPEND;
else
fl_bufsz += dp->rx_offset;
fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
- fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
- fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
return fl_bufsz;
}
-static void
-nfp_net_free_frag(void *frag, bool xdp)
-{
- if (!xdp)
- skb_free_frag(frag);
- else
- __free_page(virt_to_page(frag));
-}
-
-/**
- * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
- * @dp: NFP Net data path struct
- * @dma_addr: Pointer to storage for DMA address (output param)
- *
- * This function will allcate a new page frag, map it for DMA.
- *
- * Return: allocated page frag or NULL on failure.
- */
-static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
- void *frag;
-
- if (!dp->xdp_prog) {
- frag = netdev_alloc_frag(dp->fl_bufsz);
- } else {
- struct page *page;
-
- page = alloc_page(GFP_KERNEL);
- frag = page ? page_address(page) : NULL;
- }
- if (!frag) {
- nn_dp_warn(dp, "Failed to alloc receive page frag\n");
- return NULL;
- }
-
- *dma_addr = nfp_net_dma_map_rx(dp, frag);
- if (dma_mapping_error(dp->dev, *dma_addr)) {
- nfp_net_free_frag(frag, dp->xdp_prog);
- nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
- return NULL;
- }
-
- return frag;
-}
-
-static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
-{
- void *frag;
-
- if (!dp->xdp_prog) {
- frag = napi_alloc_frag(dp->fl_bufsz);
- if (unlikely(!frag))
- return NULL;
- } else {
- struct page *page;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return NULL;
- frag = page_address(page);
- }
-
- *dma_addr = nfp_net_dma_map_rx(dp, frag);
- if (dma_mapping_error(dp->dev, *dma_addr)) {
- nfp_net_free_frag(frag, dp->xdp_prog);
- nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
- return NULL;
- }
-
- return frag;
-}
-
-/**
- * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring structure
- * @frag: page fragment buffer
- * @dma_addr: DMA address of skb mapping
- */
-static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring,
- void *frag, dma_addr_t dma_addr)
-{
- unsigned int wr_idx;
-
- wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
-
- nfp_net_dma_sync_dev_rx(dp, dma_addr);
-
- /* Stash SKB and DMA address away */
- rx_ring->rxbufs[wr_idx].frag = frag;
- rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
-
- /* Fill freelist descriptor */
- rx_ring->rxds[wr_idx].fld.reserved = 0;
- rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
- nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
- dma_addr + dp->rx_dma_off);
-
- rx_ring->wr_p++;
- if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
- /* Update write pointer of the freelist queue. Make
- * sure all writes are flushed before telling the hardware.
- */
- wmb();
- nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
- }
-}
-
-/**
- * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
- * @rx_ring: RX ring structure
- *
- * Assumes that the device is stopped, must be idempotent.
- */
-static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int wr_idx, last_idx;
-
- /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
- * kept at cnt - 1 FL bufs.
- */
- if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
- return;
-
- /* Move the empty entry to the end of the list */
- wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
- last_idx = rx_ring->cnt - 1;
- rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
- rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
- rx_ring->rxbufs[last_idx].dma_addr = 0;
- rx_ring->rxbufs[last_idx].frag = NULL;
-
- memset(rx_ring->rxds, 0, rx_ring->size);
- rx_ring->wr_p = 0;
- rx_ring->rd_p = 0;
-}
-
-/**
- * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to remove buffers from
- *
- * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
- * entries. After device is disabled nfp_net_rx_ring_reset() must be called
- * to restore required ring geometry.
- */
-static void
-nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int i;
-
- for (i = 0; i < rx_ring->cnt - 1; i++) {
- /* NULL skb can only happen when initial filling of the ring
- * fails to allocate enough buffers and calls here to free
- * already allocated ones.
- */
- if (!rx_ring->rxbufs[i].frag)
- continue;
-
- nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
- nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
- rx_ring->rxbufs[i].dma_addr = 0;
- rx_ring->rxbufs[i].frag = NULL;
- }
-}
-
-/**
- * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to remove buffers from
- */
-static int
-nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- struct nfp_net_rx_buf *rxbufs;
- unsigned int i;
-
- rxbufs = rx_ring->rxbufs;
-
- for (i = 0; i < rx_ring->cnt - 1; i++) {
- rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
- if (!rxbufs[i].frag) {
- nfp_net_rx_ring_bufs_free(dp, rx_ring);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-/**
- * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to fill
- */
-static void
-nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
- struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int i;
-
- for (i = 0; i < rx_ring->cnt - 1; i++)
- nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
- rx_ring->rxbufs[i].dma_addr);
-}
-
-/**
- * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
- * @flags: RX descriptor flags field in CPU byte order
- */
-static int nfp_net_rx_csum_has_errors(u16 flags)
-{
- u16 csum_all_checked, csum_all_ok;
-
- csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
- csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
-
- return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
-}
-
-/**
- * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
- * @dp: NFP Net data path struct
- * @r_vec: per-ring structure
- * @rxd: Pointer to RX descriptor
- * @meta: Parsed metadata prepend
- * @skb: Pointer to SKB
- */
-static void nfp_net_rx_csum(struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_desc *rxd,
- struct nfp_meta_parsed *meta, struct sk_buff *skb)
-{
- skb_checksum_none_assert(skb);
-
- if (!(dp->netdev->features & NETIF_F_RXCSUM))
- return;
-
- if (meta->csum_type) {
- skb->ip_summed = meta->csum_type;
- skb->csum = meta->csum;
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_complete++;
- u64_stats_update_end(&r_vec->rx_sync);
- return;
- }
-
- if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_error++;
- u64_stats_update_end(&r_vec->rx_sync);
- return;
- }
-
- /* Assume that the firmware will never report inner CSUM_OK unless outer
- * L4 headers were successfully parsed. FW will always report zero UDP
- * checksum as CSUM_OK.
- */
- if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
- rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
- __skb_incr_checksum_unnecessary(skb);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_ok++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-
- if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
- rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
- __skb_incr_checksum_unnecessary(skb);
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_csum_rx_inner_ok++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-}
-
-static void
-nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
- unsigned int type, __be32 *hash)
-{
- if (!(netdev->features & NETIF_F_RXHASH))
- return;
-
- switch (type) {
- case NFP_NET_RSS_IPV4:
- case NFP_NET_RSS_IPV6:
- case NFP_NET_RSS_IPV6_EX:
- meta->hash_type = PKT_HASH_TYPE_L3;
- break;
- default:
- meta->hash_type = PKT_HASH_TYPE_L4;
- break;
- }
-
- meta->hash = get_unaligned_be32(hash);
-}
-
-static void
-nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, struct nfp_net_rx_desc *rxd)
-{
- struct nfp_net_rx_hash *rx_hash = data;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
- return;
-
- nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
- &rx_hash->hash);
-}
-
-static bool
-nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
- void *data, void *pkt, unsigned int pkt_len, int meta_len)
+static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
- u32 meta_info;
-
- meta_info = get_unaligned_be32(data);
- data += 4;
-
- while (meta_info) {
- switch (meta_info & NFP_NET_META_FIELD_MASK) {
- case NFP_NET_META_HASH:
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- nfp_net_set_hash(netdev, meta,
- meta_info & NFP_NET_META_FIELD_MASK,
- (__be32 *)data);
- data += 4;
- break;
- case NFP_NET_META_MARK:
- meta->mark = get_unaligned_be32(data);
- data += 4;
- break;
- case NFP_NET_META_PORTID:
- meta->portid = get_unaligned_be32(data);
- data += 4;
- break;
- case NFP_NET_META_CSUM:
- meta->csum_type = CHECKSUM_COMPLETE;
- meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
- data += 4;
- break;
- case NFP_NET_META_RESYNC_INFO:
- if (nfp_net_tls_rx_resync_req(netdev, data, pkt,
- pkt_len))
- return false;
- data += sizeof(struct nfp_net_tls_resync_req);
- break;
- default:
- return true;
- }
-
- meta_info >>= NFP_NET_META_FIELD_SIZE;
- }
-
- return data != pkt;
-}
-
-static void
-nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
- struct sk_buff *skb)
-{
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_drops++;
- /* If we have both skb and rxbuf the replacement buffer allocation
- * must have failed, count this as an alloc failure.
- */
- if (skb && rxbuf)
- r_vec->rx_replace_buf_alloc_fail++;
- u64_stats_update_end(&r_vec->rx_sync);
-
- /* skb is build based on the frag, free_skb() would free the frag
- * so to be able to reuse it we need an extra ref.
- */
- if (skb && rxbuf && skb->head == rxbuf->frag)
- page_ref_inc(virt_to_head_page(rxbuf->frag));
- if (rxbuf)
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
- if (skb)
- dev_kfree_skb_any(skb);
-}
-
-static bool
-nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
- struct nfp_net_tx_ring *tx_ring,
- struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
- unsigned int pkt_len, bool *completed)
-{
- unsigned int dma_map_sz = dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- int wr_idx;
-
- /* Reject if xdp_adjust_tail grow packet beyond DMA area */
- if (pkt_len + dma_off > dma_map_sz)
- return false;
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- if (!*completed) {
- nfp_net_xdp_complete(tx_ring);
- *completed = true;
- }
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
- NULL);
- return false;
- }
- }
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
-
- nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
-
- txbuf->frag = rxbuf->frag;
- txbuf->dma_addr = rxbuf->dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = pkt_len;
-
- dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
- pkt_len, DMA_BIDIRECTIONAL);
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = PCIE_DESC_TX_EOP;
- txd->dma_len = cpu_to_le16(pkt_len);
- nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
- txd->data_len = cpu_to_le16(pkt_len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- tx_ring->wr_p++;
- tx_ring->wr_ptr_add++;
- return true;
-}
-
-/**
- * nfp_net_rx() - receive up to @budget packets on @rx_ring
- * @rx_ring: RX ring to receive from
- * @budget: NAPI budget
- *
- * Note, this function is separated out from the napi poll function to
- * more cleanly separate packet receive code from other bookkeeping
- * functions performed in the napi poll function.
- *
- * Return: Number of packets received.
- */
-static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
-{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- struct nfp_net_tx_ring *tx_ring;
- struct bpf_prog *xdp_prog;
- bool xdp_tx_cmpl = false;
- unsigned int true_bufsz;
- struct sk_buff *skb;
- int pkts_polled = 0;
- struct xdp_buff xdp;
- int idx;
-
- xdp_prog = READ_ONCE(dp->xdp_prog);
- true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
- &rx_ring->xdp_rxq);
- tx_ring = r_vec->xdp_ring;
-
- while (pkts_polled < budget) {
- unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- struct nfp_net_rx_buf *rxbuf;
- struct nfp_net_rx_desc *rxd;
- struct nfp_meta_parsed meta;
- bool redir_egress = false;
- struct net_device *netdev;
- dma_addr_t new_dma_addr;
- u32 meta_len_xdp = 0;
- void *new_frag;
-
- idx = D_IDX(rx_ring, rx_ring->rd_p);
-
- rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
- break;
-
- /* Memory barrier to ensure that we won't do other reads
- * before the DD bit.
- */
- dma_rmb();
-
- memset(&meta, 0, sizeof(meta));
-
- rx_ring->rd_p++;
- pkts_polled++;
-
- rxbuf = &rx_ring->rxbufs[idx];
- /* < meta_len >
- * <-- [rx_offset] -->
- * ---------------------------------------------------------
- * | [XX] | metadata | packet | XXXX |
- * ---------------------------------------------------------
- * <---------------- data_len --------------->
- *
- * The rx_offset is fixed for all packets, the meta_len can vary
- * on a packet by packet basis. If rx_offset is set to zero
- * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
- * buffer and is immediately followed by the packet (no [XX]).
- */
- meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
- data_len = le16_to_cpu(rxd->rxd.data_len);
- pkt_len = data_len - meta_len;
-
- pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
- if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off += meta_len;
- else
- pkt_off += dp->rx_offset;
- meta_off = pkt_off - meta_len;
-
- /* Stats update */
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_pkts++;
- r_vec->rx_bytes += pkt_len;
- u64_stats_update_end(&r_vec->rx_sync);
-
- if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
- (dp->rx_offset && meta_len > dp->rx_offset))) {
- nn_dp_warn(dp, "oversized RX packet metadata %u\n",
- meta_len);
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- continue;
- }
-
- nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
- data_len);
-
- if (!dp->chained_metadata_format) {
- nfp_net_set_hash_desc(dp->netdev, &meta,
- rxbuf->frag + meta_off, rxd);
- } else if (meta_len) {
- if (unlikely(nfp_net_parse_meta(dp->netdev, &meta,
- rxbuf->frag + meta_off,
- rxbuf->frag + pkt_off,
- pkt_len, meta_len))) {
- nn_dp_warn(dp, "invalid RX packet metadata\n");
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
- NULL);
- continue;
- }
- }
-
- if (xdp_prog && !meta.portid) {
- void *orig_data = rxbuf->frag + pkt_off;
- unsigned int dma_off;
- int act;
-
- xdp_prepare_buff(&xdp,
- rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
- pkt_off - NFP_NET_RX_BUF_HEADROOM,
- pkt_len, true);
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- pkt_len = xdp.data_end - xdp.data;
- pkt_off += xdp.data - orig_data;
-
- switch (act) {
- case XDP_PASS:
- meta_len_xdp = xdp.data - xdp.data_meta;
- break;
- case XDP_TX:
- dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
- if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
- tx_ring, rxbuf,
- dma_off,
- pkt_len,
- &xdp_tx_cmpl)))
- trace_xdp_exception(dp->netdev,
- xdp_prog, act);
- continue;
- default:
- bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
- fallthrough;
- case XDP_ABORTED:
- trace_xdp_exception(dp->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
- rxbuf->dma_addr);
- continue;
- }
- }
-
- if (likely(!meta.portid)) {
- netdev = dp->netdev;
- } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
- struct nfp_net *nn = netdev_priv(dp->netdev);
-
- nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
- pkt_len);
- nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
- rxbuf->dma_addr);
- continue;
- } else {
- struct nfp_net *nn;
-
- nn = netdev_priv(dp->netdev);
- netdev = nfp_app_dev_get(nn->app, meta.portid,
- &redir_egress);
- if (unlikely(!netdev)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
- NULL);
- continue;
- }
-
- if (nfp_netdev_is_nfp_repr(netdev))
- nfp_repr_inc_rx_stats(netdev, pkt_len);
- }
-
- skb = build_skb(rxbuf->frag, true_bufsz);
- if (unlikely(!skb)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- continue;
- }
- new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
- continue;
- }
-
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
- skb_reserve(skb, pkt_off);
- skb_put(skb, pkt_len);
-
- skb->mark = meta.mark;
- skb_set_hash(skb, meta.hash, meta.hash_type);
-
- skb_record_rx_queue(skb, rx_ring->idx);
- skb->protocol = eth_type_trans(skb, netdev);
-
- nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
-
-#ifdef CONFIG_TLS_DEVICE
- if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
- skb->decrypted = true;
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->hw_tls_rx++;
- u64_stats_update_end(&r_vec->rx_sync);
- }
-#endif
-
- if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rxd->rxd.vlan));
- if (meta_len_xdp)
- skb_metadata_set(skb, meta_len_xdp);
-
- if (likely(!redir_egress)) {
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
- } else {
- skb->dev = netdev;
- skb_reset_network_header(skb);
- __skb_push(skb, ETH_HLEN);
- dev_queue_xmit(skb);
- }
- }
-
- if (xdp_prog) {
- if (tx_ring->wr_ptr_add)
- nfp_net_tx_xmit_more_flush(tx_ring);
- else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
- !xdp_tx_cmpl)
- if (!nfp_net_xdp_complete(tx_ring))
- pkts_polled = budget;
- }
-
- return pkts_polled;
-}
-
-/**
- * nfp_net_poll() - napi poll function
- * @napi: NAPI structure
- * @budget: NAPI budget
- *
- * Return: number of packets polled.
- */
-static int nfp_net_poll(struct napi_struct *napi, int budget)
-{
- struct nfp_net_r_vector *r_vec =
- container_of(napi, struct nfp_net_r_vector, napi);
- unsigned int pkts_polled = 0;
-
- if (r_vec->tx_ring)
- nfp_net_tx_complete(r_vec->tx_ring, budget);
- if (r_vec->rx_ring)
- pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
-
- if (pkts_polled < budget)
- if (napi_complete_done(napi, pkts_polled))
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
-
- if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
- struct dim_sample dim_sample = {};
- unsigned int start;
- u64 pkts, bytes;
-
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
-
- dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
- }
-
- if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
- struct dim_sample dim_sample = {};
- unsigned int start;
- u64 pkts, bytes;
-
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
-
- dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
- }
-
- return pkts_polled;
-}
-
-/* Control device data path
- */
-
-static bool
-nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
- struct sk_buff *skb, bool old)
-{
- unsigned int real_len = skb->len, meta_len = 0;
- struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_tx_buf *txbuf;
- struct nfp_net_tx_desc *txd;
- struct nfp_net_dp *dp;
- dma_addr_t dma_addr;
- int wr_idx;
-
- dp = &r_vec->nfp_net->dp;
- tx_ring = r_vec->tx_ring;
-
- if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
- nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
- goto err_free;
- }
-
- if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_busy++;
- u64_stats_update_end(&r_vec->tx_sync);
- if (!old)
- __skb_queue_tail(&r_vec->queue, skb);
- else
- __skb_queue_head(&r_vec->queue, skb);
- return true;
- }
-
- if (nfp_app_ctrl_has_meta(nn->app)) {
- if (unlikely(skb_headroom(skb) < 8)) {
- nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
- goto err_free;
- }
- meta_len = 8;
- put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
- put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
- }
-
- /* Start with the head skbuf */
- dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dp->dev, dma_addr))
- goto err_dma_warn;
-
- wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
-
- /* Stash the soft descriptor of the head then initialize it */
- txbuf = &tx_ring->txbufs[wr_idx];
- txbuf->skb = skb;
- txbuf->dma_addr = dma_addr;
- txbuf->fidx = -1;
- txbuf->pkt_cnt = 1;
- txbuf->real_len = real_len;
-
- /* Build TX descriptor */
- txd = &tx_ring->txds[wr_idx];
- txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
- txd->dma_len = cpu_to_le16(skb_headlen(skb));
- nfp_desc_set_dma_addr(txd, dma_addr);
- txd->data_len = cpu_to_le16(skb->len);
-
- txd->flags = 0;
- txd->mss = 0;
- txd->lso_hdrlen = 0;
-
- tx_ring->wr_p++;
- tx_ring->wr_ptr_add++;
- nfp_net_tx_xmit_more_flush(tx_ring);
-
- return false;
-
-err_dma_warn:
- nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
-err_free:
- u64_stats_update_begin(&r_vec->tx_sync);
- r_vec->tx_errors++;
- u64_stats_update_end(&r_vec->tx_sync);
- dev_kfree_skb_any(skb);
- return false;
-}
-
-bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
- struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
-
- return nfp_ctrl_tx_one(nn, r_vec, skb, false);
-}
-
-bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
-{
- struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
- bool ret;
-
- spin_lock_bh(&r_vec->lock);
- ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
- spin_unlock_bh(&r_vec->lock);
-
- return ret;
-}
-
-static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&r_vec->queue)))
- if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
- return;
-}
-
-static bool
-nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
-{
- u32 meta_type, meta_tag;
-
- if (!nfp_app_ctrl_has_meta(nn->app))
- return !meta_len;
-
- if (meta_len != 8)
- return false;
-
- meta_type = get_unaligned_be32(data);
- meta_tag = get_unaligned_be32(data + 4);
-
- return (meta_type == NFP_NET_META_PORTID &&
- meta_tag == NFP_META_PORT_ID_CTRL);
-}
-
-static bool
-nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
- struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
-{
- unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
- struct nfp_net_rx_buf *rxbuf;
- struct nfp_net_rx_desc *rxd;
- dma_addr_t new_dma_addr;
- struct sk_buff *skb;
- void *new_frag;
- int idx;
-
- idx = D_IDX(rx_ring, rx_ring->rd_p);
-
- rxd = &rx_ring->rxds[idx];
- if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
- return false;
-
- /* Memory barrier to ensure that we won't do other reads
- * before the DD bit.
- */
- dma_rmb();
-
- rx_ring->rd_p++;
-
- rxbuf = &rx_ring->rxbufs[idx];
- meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
- data_len = le16_to_cpu(rxd->rxd.data_len);
- pkt_len = data_len - meta_len;
-
- pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
- if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
- pkt_off += meta_len;
- else
- pkt_off += dp->rx_offset;
- meta_off = pkt_off - meta_len;
-
- /* Stats update */
- u64_stats_update_begin(&r_vec->rx_sync);
- r_vec->rx_pkts++;
- r_vec->rx_bytes += pkt_len;
- u64_stats_update_end(&r_vec->rx_sync);
-
- nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
-
- if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
- nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
- meta_len);
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- return true;
- }
-
- skb = build_skb(rxbuf->frag, dp->fl_bufsz);
- if (unlikely(!skb)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
- return true;
- }
- new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
- if (unlikely(!new_frag)) {
- nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
- return true;
- }
-
- nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
- nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
- skb_reserve(skb, pkt_off);
- skb_put(skb, pkt_len);
-
- nfp_app_ctrl_rx(nn->app, skb);
-
- return true;
-}
+ unsigned int fl_bufsz;
-static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
-{
- struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
- struct nfp_net *nn = r_vec->nfp_net;
- struct nfp_net_dp *dp = &nn->dp;
- unsigned int budget = 512;
+ fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
+ fl_bufsz += dp->rx_dma_off;
+ fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
- continue;
+ fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
+ fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- return budget;
+ return fl_bufsz;
}
-static void nfp_ctrl_poll(struct tasklet_struct *t)
+static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
{
- struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
+ unsigned int fl_bufsz;
- spin_lock(&r_vec->lock);
- nfp_net_tx_complete(r_vec->tx_ring, 0);
- __nfp_ctrl_tx_queued(r_vec);
- spin_unlock(&r_vec->lock);
+ fl_bufsz = XDP_PACKET_HEADROOM;
+ fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
- if (nfp_ctrl_rx(r_vec)) {
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- } else {
- tasklet_schedule(&r_vec->tasklet);
- nn_dp_warn(&r_vec->nfp_net->dp,
- "control message budget exceeded!\n");
- }
+ return fl_bufsz;
}
/* Setup and Configuration
@@ -2371,7 +754,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
- tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
+ tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
@@ -2379,263 +762,25 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
}
}
-/**
- * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
- * @tx_ring: TX ring to free
- */
-static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
- kvfree(tx_ring->txbufs);
-
- if (tx_ring->txds)
- dma_free_coherent(dp->dev, tx_ring->size,
- tx_ring->txds, tx_ring->dma);
-
- tx_ring->cnt = 0;
- tx_ring->txbufs = NULL;
- tx_ring->txds = NULL;
- tx_ring->dma = 0;
- tx_ring->size = 0;
-}
-
-/**
- * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
- * @dp: NFP Net data path struct
- * @tx_ring: TX Ring structure to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-
- tx_ring->cnt = dp->txd_cnt;
-
- tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
- tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
- if (!tx_ring->txds) {
- netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
- tx_ring->cnt);
- goto err_alloc;
- }
-
- tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
- GFP_KERNEL);
- if (!tx_ring->txbufs)
- goto err_alloc;
-
- if (!tx_ring->is_xdp && dp->netdev)
- netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
- tx_ring->idx);
-
- return 0;
-
-err_alloc:
- nfp_net_tx_ring_free(tx_ring);
- return -ENOMEM;
-}
-
static void
-nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
- struct nfp_net_tx_ring *tx_ring)
-{
- unsigned int i;
-
- if (!tx_ring->is_xdp)
- return;
-
- for (i = 0; i < tx_ring->cnt; i++) {
- if (!tx_ring->txbufs[i].frag)
- return;
-
- nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
- __free_page(virt_to_page(tx_ring->txbufs[i].frag));
- }
-}
-
-static int
-nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
- struct nfp_net_tx_ring *tx_ring)
-{
- struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
- unsigned int i;
-
- if (!tx_ring->is_xdp)
- return 0;
-
- for (i = 0; i < tx_ring->cnt; i++) {
- txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
- if (!txbufs[i].frag) {
- nfp_net_tx_ring_bufs_free(dp, tx_ring);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
- GFP_KERNEL);
- if (!dp->tx_rings)
- return -ENOMEM;
-
- for (r = 0; r < dp->num_tx_rings; r++) {
- int bias = 0;
-
- if (r >= dp->num_stack_tx_rings)
- bias = dp->num_stack_tx_rings;
-
- nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
- r, bias);
-
- if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
- goto err_free_prev;
-
- if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
- goto err_free_ring;
- }
-
- return 0;
-
-err_free_prev:
- while (r--) {
- nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
-err_free_ring:
- nfp_net_tx_ring_free(&dp->tx_rings[r]);
- }
- kfree(dp->tx_rings);
- return -ENOMEM;
-}
-
-static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- for (r = 0; r < dp->num_tx_rings; r++) {
- nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
- nfp_net_tx_ring_free(&dp->tx_rings[r]);
- }
-
- kfree(dp->tx_rings);
-}
-
-/**
- * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
- * @rx_ring: RX ring to free
- */
-static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
+nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
- struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
-
if (dp->netdev)
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- kvfree(rx_ring->rxbufs);
-
- if (rx_ring->rxds)
- dma_free_coherent(dp->dev, rx_ring->size,
- rx_ring->rxds, rx_ring->dma);
-
- rx_ring->cnt = 0;
- rx_ring->rxbufs = NULL;
- rx_ring->rxds = NULL;
- rx_ring->dma = 0;
- rx_ring->size = 0;
-}
-
-/**
- * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
- * @dp: NFP Net data path struct
- * @rx_ring: RX ring to allocate
- *
- * Return: 0 on success, negative errno otherwise.
- */
-static int
-nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
-{
- int err;
-
- if (dp->netdev) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
- rx_ring->idx, rx_ring->r_vec->napi.napi_id);
- if (err < 0)
- return err;
- }
-
- rx_ring->cnt = dp->rxd_cnt;
- rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
- rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_NOWARN);
- if (!rx_ring->rxds) {
- netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
- rx_ring->cnt);
- goto err_alloc;
- }
-
- rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
- GFP_KERNEL);
- if (!rx_ring->rxbufs)
- goto err_alloc;
-
- return 0;
-
-err_alloc:
- nfp_net_rx_ring_free(rx_ring);
- return -ENOMEM;
-}
-
-static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
-{
- unsigned int r;
-
- dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
- GFP_KERNEL);
- if (!dp->rx_rings)
- return -ENOMEM;
-
- for (r = 0; r < dp->num_rx_rings; r++) {
- nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
-
- if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
- goto err_free_prev;
-
- if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
- goto err_free_ring;
- }
-
- return 0;
-
-err_free_prev:
- while (r--) {
- nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
-err_free_ring:
- nfp_net_rx_ring_free(&dp->rx_rings[r]);
- }
- kfree(dp->rx_rings);
- return -ENOMEM;
+ netif_napi_add(dp->netdev, &r_vec->napi,
+ nfp_net_has_xsk_pool_slow(dp, idx) ?
+ dp->ops->xsk_poll : dp->ops->poll,
+ NAPI_POLL_WEIGHT);
+ else
+ tasklet_enable(&r_vec->tasklet);
}
-static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+static void
+nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
{
- unsigned int r;
-
- for (r = 0; r < dp->num_rx_rings; r++) {
- nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
- nfp_net_rx_ring_free(&dp->rx_rings[r]);
- }
-
- kfree(dp->rx_rings);
+ if (dp->netdev)
+ netif_napi_del(&r_vec->napi);
+ else
+ tasklet_disable(&r_vec->tasklet);
}
static void
@@ -2648,6 +793,17 @@ nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
+
+ if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
+ r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
+
+ if (r_vec->xsk_pool)
+ xsk_pool_set_rxq_info(r_vec->xsk_pool,
+ &r_vec->rx_ring->xdp_rxq);
+
+ nfp_net_napi_del(dp, r_vec);
+ nfp_net_napi_add(dp, r_vec, idx);
+ }
}
static int
@@ -2656,23 +812,14 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
{
int err;
- /* Setup NAPI */
- if (nn->dp.netdev)
- netif_napi_add(nn->dp.netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
- else
- tasklet_enable(&r_vec->tasklet);
+ nfp_net_napi_add(&nn->dp, r_vec, idx);
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
r_vec);
if (err) {
- if (nn->dp.netdev)
- netif_napi_del(&r_vec->napi);
- else
- tasklet_disable(&r_vec->tasklet);
-
+ nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
@@ -2690,11 +837,7 @@ static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
irq_set_affinity_hint(r_vec->irq_vector, NULL);
- if (nn->dp.netdev)
- netif_napi_del(&r_vec->napi);
- else
- tasklet_disable(&r_vec->tasklet);
-
+ nfp_net_napi_del(&nn->dp, r_vec);
free_irq(r_vec->irq_vector, r_vec);
}
@@ -2768,17 +911,6 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
}
-static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
-{
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
-
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
-}
-
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -2808,8 +940,11 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- for (r = 0; r < nn->dp.num_rx_rings; r++)
+ for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
+ if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
+ nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
+ }
for (r = 0; r < nn->dp.num_tx_rings; r++)
nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
for (r = 0; r < nn->dp.num_r_vecs; r++)
@@ -2818,25 +953,6 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn->dp.ctrl = new_ctrl;
}
-static void
-nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
- struct nfp_net_rx_ring *rx_ring, unsigned int idx)
-{
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
-}
-
-static void
-nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
- struct nfp_net_tx_ring *tx_ring, unsigned int idx)
-{
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
-}
-
/**
* nfp_net_set_config_and_enable() - Write control BAR and enable NFP
* @nn: NFP Net device to reconfigure
@@ -2866,11 +982,11 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
for (r = 0; r < nn->dp.num_rx_rings; r++)
nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
+ U64_MAX >> (64 - nn->dp.num_tx_rings));
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
- 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
+ U64_MAX >> (64 - nn->dp.num_rx_rings));
if (nn->dp.netdev)
nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
@@ -3296,20 +1412,39 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
*new = nn->dp;
+ new->xsk_pools = kmemdup(new->xsk_pools,
+ array_size(nn->max_r_vecs,
+ sizeof(new->xsk_pools)),
+ GFP_KERNEL);
+ if (!new->xsk_pools) {
+ kfree(new);
+ return NULL;
+ }
+
/* Clear things which need to be recomputed */
new->fl_bufsz = 0;
new->tx_rings = NULL;
new->rx_rings = NULL;
new->num_r_vecs = 0;
new->num_stack_tx_rings = 0;
+ new->txrwb = NULL;
+ new->txrwb_dma = 0;
return new;
}
+static void nfp_net_free_dp(struct nfp_net_dp *dp)
+{
+ kfree(dp->xsk_pools);
+ kfree(dp);
+}
+
static int
nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
struct netlink_ext_ack *extack)
{
+ unsigned int r, xsk_min_fl_bufsz;
+
/* XDP-enabled tests */
if (!dp->xdp_prog)
return 0;
@@ -3322,6 +1457,18 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
return -EINVAL;
}
+ xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
+ for (r = 0; r < nn->max_r_vecs; r++) {
+ if (!dp->xsk_pools[r])
+ continue;
+
+ if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XSK buffer pool chunk size too small");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -3389,7 +1536,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
nfp_net_open_stack(nn);
exit_free_dp:
- kfree(dp);
+ nfp_net_free_dp(dp);
return err;
@@ -3398,7 +1545,7 @@ err_free_rx:
err_cleanup_vecs:
for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
- kfree(dp);
+ nfp_net_free_dp(dp);
return err;
}
@@ -3716,6 +1863,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
return nfp_net_xdp_setup_drv(nn, xdp);
case XDP_SETUP_PROG_HW:
return nfp_net_xdp_setup_hw(nn, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return nfp_app_bpf(nn->app, nn, xdp);
}
@@ -3742,7 +1892,35 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
-const struct net_device_ops nfp_net_netdev_ops = {
+const struct net_device_ops nfp_nfd3_netdev_ops = {
+ .ndo_init = nfp_app_ndo_init,
+ .ndo_uninit = nfp_app_ndo_uninit,
+ .ndo_open = nfp_net_netdev_open,
+ .ndo_stop = nfp_net_netdev_close,
+ .ndo_start_xmit = nfp_net_tx,
+ .ndo_get_stats64 = nfp_net_stat64,
+ .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = nfp_app_set_vf_mac,
+ .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
+ .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
+ .ndo_get_vf_config = nfp_app_get_vf_config,
+ .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
+ .ndo_setup_tc = nfp_port_setup_tc,
+ .ndo_tx_timeout = nfp_net_tx_timeout,
+ .ndo_set_rx_mode = nfp_net_set_rx_mode,
+ .ndo_change_mtu = nfp_net_change_mtu,
+ .ndo_set_mac_address = nfp_net_set_mac_address,
+ .ndo_set_features = nfp_net_set_features,
+ .ndo_features_check = nfp_net_features_check,
+ .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+ .ndo_bpf = nfp_net_xdp,
+ .ndo_xsk_wakeup = nfp_net_xsk_wakeup,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
+};
+
+const struct net_device_ops nfp_nfdk_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
.ndo_open = nfp_net_netdev_open,
@@ -3811,10 +1989,10 @@ void nfp_net_info(struct nfp_net *nn)
nn->dp.num_tx_rings, nn->max_tx_rings,
nn->dp.num_rx_rings, nn->max_rx_rings);
nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
- nn->fw_ver.resv, nn->fw_ver.class,
+ nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3832,6 +2010,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
+ nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
@@ -3843,6 +2022,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @dev_info: NFP ASIC params
* @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
@@ -3855,7 +2035,8 @@ void nfp_net_info(struct nfp_net *nn)
* Return: NFP Net device structure, or ERR_PTR on error.
*/
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
+ void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
@@ -3880,7 +2061,28 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.dev = &pdev->dev;
nn->dp.ctrl_bar = ctrl_bar;
+ nn->dev_info = dev_info;
nn->pdev = pdev;
+ nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
+
+ switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
+ case NFP_NET_CFG_VERSION_DP_NFD3:
+ nn->dp.ops = &nfp_nfd3_ops;
+ break;
+ case NFP_NET_CFG_VERSION_DP_NFDK:
+ if (nn->fw_ver.major < 5) {
+ dev_err(&pdev->dev,
+ "NFDK must use ABI 5 or newer, found: %d\n",
+ nn->fw_ver.major);
+ err = -EINVAL;
+ goto err_free_nn;
+ }
+ nn->dp.ops = &nfp_nfdk_ops;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_free_nn;
+ }
nn->max_tx_rings = max_tx_rings;
nn->max_rx_rings = max_rx_rings;
@@ -3893,6 +2095,14 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
nn->dp.num_r_vecs = min_t(unsigned int,
nn->dp.num_r_vecs, num_online_cpus());
+ nn->max_r_vecs = nn->dp.num_r_vecs;
+
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ GFP_KERNEL);
+ if (!nn->dp.xsk_pools) {
+ err = -ENOMEM;
+ goto err_free_nn;
+ }
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
@@ -3932,6 +2142,7 @@ void nfp_net_free(struct nfp_net *nn)
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
nfp_ccm_mbox_free(nn);
+ kfree(nn->dp.xsk_pools);
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -4090,7 +2301,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
/* Finalise the netdev setup */
- netdev->netdev_ops = &nfp_net_netdev_ops;
+ switch (nn->dp.ops->version) {
+ case NFP_NFD_VER_NFD3:
+ netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ break;
+ case NFP_NFD_VER_NFDK:
+ netdev->netdev_ops = &nfp_nfdk_netdev_ops;
+ break;
+ }
+
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
/* MTU range: 68 - hw-specific max */
@@ -4138,6 +2357,9 @@ static int nfp_net_read_caps(struct nfp_net *nn)
nn->dp.rx_offset = NFP_NET_RX_OFFSET;
}
+ /* Mask out NFD-version-specific features */
+ nn->cap &= nn->dp.ops->cap_mask;
+
/* For control vNICs mask out the capabilities app doesn't want. */
if (!nn->dp.netdev)
nn->cap &= nn->app->type->ctrl_cap_mask;
@@ -4190,6 +2412,10 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
+ /* Enable TX pointer writeback, if supported */
+ if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
+
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3d61a8cb60b0..8892a94f00c3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,8 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
-/*
- * nfp_net_ctrl.h
+/* nfp_net_ctrl.h
* Netronome network device driver: Control BAR layout
* Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
* Jason McMullan <jason.mcmullan@netronome.com>
@@ -15,30 +14,24 @@
#include <linux/types.h>
-/**
- * Configuration BAR size.
+/* Configuration BAR size.
*
* The configuration BAR is 8K in size, but due to
* THB-350, 32k needs to be reserved.
*/
#define NFP_NET_CFG_BAR_SZ (32 * 1024)
-/**
- * Offset in Freelist buffer where packet starts on RX
- */
+/* Offset in Freelist buffer where packet starts on RX */
#define NFP_NET_RX_OFFSET 32
-/**
- * LSO parameters
+/* LSO parameters
* %NFP_NET_LSO_MAX_HDR_SZ: Maximum header size supported for LSO frames
* %NFP_NET_LSO_MAX_SEGS: Maximum number of segments LSO frame can produce
*/
#define NFP_NET_LSO_MAX_HDR_SZ 255
#define NFP_NET_LSO_MAX_SEGS 64
-/**
- * Prepend field types
- */
+/* Prepend field types */
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
@@ -49,9 +42,7 @@
#define NFP_META_PORT_ID_CTRL ~0U
-/**
- * Hash type pre-pended when a RSS hash was computed
- */
+/* Hash type pre-pended when a RSS hash was computed */
#define NFP_NET_RSS_NONE 0
#define NFP_NET_RSS_IPV4 1
#define NFP_NET_RSS_IPV6 2
@@ -63,16 +54,14 @@
#define NFP_NET_RSS_IPV6_UDP 8
#define NFP_NET_RSS_IPV6_EX_UDP 9
-/**
- * Ring counts
+/* Ring counts
* %NFP_NET_TXR_MAX: Maximum number of TX rings
* %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
#define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64
-/**
- * Read/Write config words (0x0000 - 0x002c)
+/* Read/Write config words (0x0000 - 0x002c)
* %NFP_NET_CFG_CTRL: Global control
* %NFP_NET_CFG_UPDATE: Indicate which fields are updated
* %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
@@ -103,7 +92,6 @@
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
-#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
@@ -147,8 +135,7 @@
#define NFP_NET_CFG_LSC 0x0020
#define NFP_NET_CFG_MACADDR 0x0024
-/**
- * Read-only words (0x0030 - 0x0050):
+/* Read-only words (0x0030 - 0x0050):
* %NFP_NET_CFG_VERSION: Firmware version number
* %NFP_NET_CFG_STS: Status
* %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
@@ -162,7 +149,10 @@
* - define more STS bits
*/
#define NFP_NET_CFG_VERSION 0x0030
-#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24)
+#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xfe << 24)
+#define NFP_NET_CFG_VERSION_DP_NFD3 0
+#define NFP_NET_CFG_VERSION_DP_NFDK 1
+#define NFP_NET_CFG_VERSION_DP_MASK 1
#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16)
#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0
@@ -193,36 +183,31 @@
#define NFP_NET_CFG_START_TXQ 0x0048
#define NFP_NET_CFG_START_RXQ 0x004c
-/**
- * Prepend configuration
+/* Prepend configuration
*/
#define NFP_NET_CFG_RX_OFFSET 0x0050
#define NFP_NET_CFG_RX_OFFSET_DYNAMIC 0 /* Prepend mode */
-/**
- * RSS capabilities
+/* RSS capabilities
* %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
* %NFP_NET_CFG_RSS_HFUNC)
*/
#define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
-/**
- * TLV area start
+/* TLV area start
* %NFP_NET_CFG_TLV_BASE: start anchor of the TLV area
*/
#define NFP_NET_CFG_TLV_BASE 0x0058
-/**
- * VXLAN/UDP encap configuration
+/* VXLAN/UDP encap configuration
* %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/
#define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008
-/**
- * BPF section
+/* BPF section
* %NFP_NET_CFG_BPF_ABI: BPF ABI version
* %NFP_NET_CFG_BPF_CAP: BPF capabilities
* %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
@@ -247,14 +232,12 @@
#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
-/**
- * 40B reserved for future use (0x0098 - 0x00c0)
+/* 40B reserved for future use (0x0098 - 0x00c0)
*/
#define NFP_NET_CFG_RESERVED 0x0098
#define NFP_NET_CFG_RESERVED_SZ 0x0028
-/**
- * RSS configuration (0x0100 - 0x01ac):
+/* RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled
* %NFP_NET_CFG_RSS_CFG: RSS configuration word
* %NFP_NET_CFG_RSS_KEY: RSS "secret" key
@@ -281,8 +264,7 @@
NFP_NET_CFG_RSS_KEY_SZ)
#define NFP_NET_CFG_RSS_ITBL_SZ 0x80
-/**
- * TX ring configuration (0x200 - 0x800)
+/* TX ring configuration (0x200 - 0x800)
* %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
* %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
* %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
@@ -301,8 +283,7 @@
#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \
((_x) * 0x4))
-/**
- * RX ring configuration (0x0800 - 0x0c00)
+/* RX ring configuration (0x0800 - 0x0c00)
* %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
* %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
* %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
@@ -318,8 +299,7 @@
#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \
((_x) * 0x4))
-/**
- * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
+/* Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not
* enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the
@@ -334,8 +314,7 @@
#define NFP_NET_CFG_ICR_RXTX 0x1
#define NFP_NET_CFG_ICR_LSC 0x2
-/**
- * General device stats (0x0d00 - 0x0d90)
+/* General device stats (0x0d00 - 0x0d90)
* all counters are 64bit.
*/
#define NFP_NET_CFG_STATS_BASE 0x0d00
@@ -368,8 +347,7 @@
#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
-/**
- * Per ring stats (0x1000 - 0x1800)
+/* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
* %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
* %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
@@ -381,8 +359,7 @@
#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
-/**
- * General use mailbox area (0x1800 - 0x19ff)
+/* General use mailbox area (0x1800 - 0x19ff)
* 4B used for update command and 4B return code
* followed by a max of 504B of variable length value
*/
@@ -399,8 +376,7 @@
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
-/**
- * VLAN filtering using general use mailbox
+/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
* %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
@@ -411,8 +387,7 @@
#define NFP_NET_CFG_VLAN_FILTER_PROTO (NFP_NET_CFG_VLAN_FILTER + 2)
#define NFP_NET_CFG_VLAN_FILTER_SZ 0x0004
-/**
- * TLV capabilities
+/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
* %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
@@ -438,8 +413,7 @@
#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
-/**
- * Capability TLV types
+/* Capability TLV types
*
* %NFP_NET_CFG_TLV_TYPE_UNKNOWN:
* Special TLV type to catch bugs, should never be encountered. Drivers should
@@ -512,8 +486,7 @@
struct device;
-/**
- * struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
+/* struct nfp_net_tlv_caps - parsed control BAR TLV capabilities
* @me_freq_mhz: ME clock_freq (MHz)
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 553c708694e8..d8b735ccf899 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include "nfp_net.h"
+#include "nfp_net_dp.h"
static struct dentry *nfp_dir;
@@ -42,13 +43,19 @@ static int nfp_rx_q_show(struct seq_file *file, void *data)
seq_printf(file, "%04d: 0x%08x 0x%08x", i,
rxd->vals[0], rxd->vals[1]);
- frag = READ_ONCE(rx_ring->rxbufs[i].frag);
- if (frag)
- seq_printf(file, " frag=%p", frag);
+ if (!r_vec->xsk_pool) {
+ frag = READ_ONCE(rx_ring->rxbufs[i].frag);
+ if (frag)
+ seq_printf(file, " frag=%p", frag);
- if (rx_ring->rxbufs[i].dma_addr)
- seq_printf(file, " dma_addr=%pad",
- &rx_ring->rxbufs[i].dma_addr);
+ if (rx_ring->rxbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &rx_ring->rxbufs[i].dma_addr);
+ } else {
+ if (rx_ring->xsk_rxbufs[i].dma_addr)
+ seq_printf(file, " dma_addr=%pad",
+ &rx_ring->xsk_rxbufs[i].dma_addr);
+ }
if (i == rx_ring->rd_p % rxd_cnt)
seq_puts(file, " H_RD ");
@@ -74,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
- struct nfp_net_tx_desc *txd;
- int d_rd_p, d_wr_p, txd_cnt;
struct nfp_net *nn;
- int i;
+ int d_rd_p, d_wr_p;
rtnl_lock();
@@ -91,49 +96,20 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
if (!nfp_net_running(nn))
goto out;
- txd_cnt = tx_ring->cnt;
-
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
- seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
+ seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u",
tx_ring->idx, tx_ring->qcidx,
tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
+ if (tx_ring->txrwb)
+ seq_printf(file, " TXRWB=%llu", *tx_ring->txrwb);
+ seq_putc(file, '\n');
- for (i = 0; i < txd_cnt; i++) {
- txd = &tx_ring->txds[i];
- seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
- txd->vals[0], txd->vals[1],
- txd->vals[2], txd->vals[3]);
-
- if (tx_ring == r_vec->tx_ring) {
- struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);
-
- if (skb)
- seq_printf(file, " skb->head=%p skb->data=%p",
- skb->head, skb->data);
- } else {
- seq_printf(file, " frag=%p",
- READ_ONCE(tx_ring->txbufs[i].frag));
- }
-
- if (tx_ring->txbufs[i].dma_addr)
- seq_printf(file, " dma_addr=%pad",
- &tx_ring->txbufs[i].dma_addr);
-
- if (i == tx_ring->rd_p % txd_cnt)
- seq_puts(file, " H_RD");
- if (i == tx_ring->wr_p % txd_cnt)
- seq_puts(file, " H_WR");
- if (i == d_rd_p % txd_cnt)
- seq_puts(file, " D_RD");
- if (i == d_wr_p % txd_cnt)
- seq_puts(file, " D_WR");
-
- seq_putc(file, '\n');
- }
+ nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring,
+ d_rd_p, d_wr_p);
out:
rtnl_unlock();
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
new file mode 100644
index 000000000000..34dd94811df3
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
+
+#include "nfp_app.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+/**
+ * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
+ * @dp: NFP Net data path struct
+ * @dma_addr: Pointer to storage for DMA address (output param)
+ *
+ * This function will allcate a new page frag, map it for DMA.
+ *
+ * Return: allocated page frag or NULL on failure.
+ */
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
+{
+ void *frag;
+
+ if (!dp->xdp_prog) {
+ frag = netdev_alloc_frag(dp->fl_bufsz);
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL);
+ frag = page ? page_address(page) : NULL;
+ }
+ if (!frag) {
+ nn_dp_warn(dp, "Failed to alloc receive page frag\n");
+ return NULL;
+ }
+
+ *dma_addr = nfp_net_dma_map_rx(dp, frag);
+ if (dma_mapping_error(dp->dev, *dma_addr)) {
+ nfp_net_free_frag(frag, dp->xdp_prog);
+ nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
+ return NULL;
+ }
+
+ return frag;
+}
+
+/**
+ * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
+ * @tx_ring: TX ring structure
+ * @dp: NFP Net data path struct
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
+ * @is_xdp: Is this an XDP TX ring?
+ */
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec, unsigned int idx,
+ bool is_xdp)
+{
+ struct nfp_net *nn = r_vec->nfp_net;
+
+ tx_ring->idx = idx;
+ tx_ring->r_vec = r_vec;
+ tx_ring->is_xdp = is_xdp;
+ u64_stats_init(&tx_ring->r_vec->tx_sync);
+
+ tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
+ tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
+ tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
+ * @rx_ring: RX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
+ */
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
+{
+ struct nfp_net *nn = r_vec->nfp_net;
+
+ rx_ring->idx = idx;
+ rx_ring->r_vec = r_vec;
+ u64_stats_init(&rx_ring->r_vec->rx_sync);
+
+ rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
+ rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
+}
+
+/**
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring: RX ring structure
+ *
+ * Assumes that the device is stopped, must be idempotent.
+ */
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int wr_idx, last_idx;
+
+ /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
+ * kept at cnt - 1 FL bufs.
+ */
+ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
+ return;
+
+ /* Move the empty entry to the end of the list */
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+ last_idx = rx_ring->cnt - 1;
+ if (rx_ring->r_vec->xsk_pool) {
+ rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
+ memset(&rx_ring->xsk_rxbufs[last_idx], 0,
+ sizeof(*rx_ring->xsk_rxbufs));
+ } else {
+ rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
+ memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
+ }
+
+ memset(rx_ring->rxds, 0, rx_ring->size);
+ rx_ring->wr_p = 0;
+ rx_ring->rd_p = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries. After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ /* NULL skb can only happen when initial filling of the ring
+ * fails to allocate enough buffers and calls here to free
+ * already allocated ones.
+ */
+ if (!rx_ring->rxbufs[i].frag)
+ continue;
+
+ nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
+ nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
+ rx_ring->rxbufs[i].dma_addr = 0;
+ rx_ring->rxbufs[i].frag = NULL;
+ }
+}
+
+/**
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to remove buffers from
+ */
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_rx_buf *rxbufs;
+ unsigned int i;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ return 0;
+
+ rxbufs = rx_ring->rxbufs;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
+ if (!rxbufs[i].frag) {
+ nfp_net_rx_ring_bufs_free(dp, rx_ring);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
+ GFP_KERNEL);
+ if (!dp->tx_rings)
+ return -ENOMEM;
+
+ if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
+ dp->txrwb = dma_alloc_coherent(dp->dev,
+ dp->num_tx_rings * sizeof(u64),
+ &dp->txrwb_dma, GFP_KERNEL);
+ if (!dp->txrwb)
+ goto err_free_rings;
+ }
+
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ int bias = 0;
+
+ if (r >= dp->num_stack_tx_rings)
+ bias = dp->num_stack_tx_rings;
+
+ nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
+ &nn->r_vecs[r - bias], r, bias);
+
+ if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+err_free_ring:
+ nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+ }
+ if (dp->txrwb)
+ dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+ dp->txrwb, dp->txrwb_dma);
+err_free_rings:
+ kfree(dp->tx_rings);
+ return -ENOMEM;
+}
+
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ for (r = 0; r < dp->num_tx_rings; r++) {
+ nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
+ nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
+ }
+
+ if (dp->txrwb)
+ dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
+ dp->txrwb, dp->txrwb_dma);
+ kfree(dp->tx_rings);
+}
+
+/**
+ * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
+ * @rx_ring: RX ring to free
+ */
+static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
+
+ if (dp->netdev)
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
+ kvfree(rx_ring->xsk_rxbufs);
+ else
+ kvfree(rx_ring->rxbufs);
+
+ if (rx_ring->rxds)
+ dma_free_coherent(dp->dev, rx_ring->size,
+ rx_ring->rxds, rx_ring->dma);
+
+ rx_ring->cnt = 0;
+ rx_ring->rxbufs = NULL;
+ rx_ring->xsk_rxbufs = NULL;
+ rx_ring->rxds = NULL;
+ rx_ring->dma = 0;
+ rx_ring->size = 0;
+}
+
+/**
+ * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
+ * @dp: NFP Net data path struct
+ * @rx_ring: RX ring to allocate
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
+{
+ enum xdp_mem_type mem_type;
+ size_t rxbuf_sw_desc_sz;
+ int err;
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+ mem_type = MEM_TYPE_XSK_BUFF_POOL;
+ rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
+ } else {
+ mem_type = MEM_TYPE_PAGE_ORDER0;
+ rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
+ }
+
+ if (dp->netdev) {
+ err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+ rx_ring->idx, rx_ring->r_vec->napi.napi_id);
+ if (err < 0)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
+ if (err)
+ goto err_alloc;
+ }
+
+ rx_ring->cnt = dp->rxd_cnt;
+ rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
+ rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rx_ring->rxds) {
+ netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ rx_ring->cnt);
+ goto err_alloc;
+ }
+
+ if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
+ rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+ GFP_KERNEL);
+ if (!rx_ring->xsk_rxbufs)
+ goto err_alloc;
+ } else {
+ rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
+ GFP_KERNEL);
+ if (!rx_ring->rxbufs)
+ goto err_alloc;
+ }
+
+ return 0;
+
+err_alloc:
+ nfp_net_rx_ring_free(rx_ring);
+ return -ENOMEM;
+}
+
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
+ GFP_KERNEL);
+ if (!dp->rx_rings)
+ return -ENOMEM;
+
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
+
+ if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
+ goto err_free_prev;
+
+ if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
+ goto err_free_ring;
+ }
+
+ return 0;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+err_free_ring:
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
+ }
+ kfree(dp->rx_rings);
+ return -ENOMEM;
+}
+
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
+{
+ unsigned int r;
+
+ for (r = 0; r < dp->num_rx_rings; r++) {
+ nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
+ nfp_net_rx_ring_free(&dp->rx_rings[r]);
+ }
+
+ kfree(dp->rx_rings);
+}
+
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx)
+{
+ /* Write the DMA address, size and MSI-X info to the device */
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
+}
+
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
+ if (tx_ring->txrwb) {
+ *tx_ring->txrwb = 0;
+ nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
+ nn->dp.txrwb_dma + idx * sizeof(u64));
+ }
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
+}
+
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+ nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ return nn->dp.ops->xmit(skb, netdev);
+}
+
+bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+
+ return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+}
+
+bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
+{
+ struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
+ bool ret;
+
+ spin_lock_bh(&r_vec->lock);
+ ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
+ spin_unlock_bh(&r_vec->lock);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
new file mode 100644
index 000000000000..c934cc2d3208
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_NET_DP_
+#define _NFP_NET_DP_
+
+#include "nfp_net.h"
+
+static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
+{
+ return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void
+nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
+{
+ dma_sync_single_for_device(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir);
+}
+
+static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
+ dma_addr_t dma_addr)
+{
+ dma_unmap_single_attrs(dp->dev, dma_addr,
+ dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
+ dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
+ dma_addr_t dma_addr,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
+ len, dp->rx_dma_dir);
+}
+
+/**
+ * nfp_net_tx_full() - check if the TX ring is full
+ * @tx_ring: TX ring to check
+ * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
+ *
+ * This function checks, based on the *host copy* of read/write
+ * pointer if a given TX ring is full. The real TX queue may have
+ * some newly made available slots.
+ *
+ * Return: True if the ring is full.
+ */
+static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
+{
+ return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
+}
+
+static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
+{
+ wmb(); /* drain writebuffer */
+ nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
+ tx_ring->wr_ptr_add = 0;
+}
+
+static inline u32
+nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
+{
+ if (tx_ring->txrwb)
+ return *tx_ring->txrwb;
+ return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
+}
+
+static inline void nfp_net_free_frag(void *frag, bool xdp)
+{
+ if (!xdp)
+ skb_free_frag(frag);
+ else
+ __free_page(virt_to_page(frag));
+}
+
+/**
+ * nfp_net_irq_unmask() - Unmask automasked interrupt
+ * @nn: NFP Network structure
+ * @entry_nr: MSI-X table entry
+ *
+ * Clear the ICR for the IRQ entry.
+ */
+static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
+{
+ nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
+ nn_pci_flush(nn);
+}
+
+struct seq_file;
+
+/* Common */
+void
+nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_rx_ring *rx_ring, unsigned int idx);
+void
+nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
+ struct nfp_net_tx_ring *tx_ring, unsigned int idx);
+void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
+
+void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
+int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
+void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
+void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
+
+enum nfp_nfd_version {
+ NFP_NFD_VER_NFD3,
+ NFP_NFD_VER_NFDK,
+};
+
+/**
+ * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
+ * @version: Indicate dp type
+ * @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
+ * @cap_mask: Mask of supported features
+ * @poll: Napi poll for normal rx/tx
+ * @xsk_poll: Napi poll when xsk is enabled
+ * @ctrl_poll: Tasklet poll for ctrl rx/tx
+ * @xmit: Xmit for normal path
+ * @ctrl_tx_one: Xmit for ctrl path
+ * @rx_ring_fill_freelist: Give buffers from the ring to FW
+ * @tx_ring_alloc: Allocate resource for a TX ring
+ * @tx_ring_reset: Free any untransmitted buffers and reset pointers
+ * @tx_ring_free: Free resources allocated to a TX ring
+ * @tx_ring_bufs_alloc: Allocate resource for each TX buffer
+ * @tx_ring_bufs_free: Free resources allocated to each TX buffer
+ * @print_tx_descs: Show TX ring's info for debug purpose
+ */
+struct nfp_dp_ops {
+ enum nfp_nfd_version version;
+ unsigned int tx_min_desc_per_pkt;
+ u32 cap_mask;
+
+ int (*poll)(struct napi_struct *napi, int budget);
+ int (*xsk_poll)(struct napi_struct *napi, int budget);
+ void (*ctrl_poll)(struct tasklet_struct *t);
+ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
+ bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ struct sk_buff *skb, bool old);
+ void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring);
+ int (*tx_ring_alloc)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_reset)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
+ int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+ void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring);
+
+ void (*print_tx_descs)(struct seq_file *file,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p);
+};
+
+static inline void
+nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_reset(dp, tx_ring);
+}
+
+static inline void
+nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
+ struct nfp_net_rx_ring *rx_ring)
+{
+ dp->ops->rx_ring_fill_freelist(dp, rx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
+{
+ dp->ops->tx_ring_free(tx_ring);
+}
+
+static inline int
+nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
+}
+
+static inline void
+nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
+ struct nfp_net_tx_ring *tx_ring)
+{
+ dp->ops->tx_ring_bufs_free(dp, tx_ring);
+}
+
+static inline void
+nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
+ struct nfp_net_r_vector *r_vec,
+ struct nfp_net_tx_ring *tx_ring,
+ u32 d_rd_p, u32 d_wr_p)
+{
+ dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
+}
+
+extern const struct nfp_dp_ops nfp_nfd3_ops;
+extern const struct nfp_dp_ops nfp_nfdk_ops;
+
+netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
+
+#endif /* _NFP_NET_DP_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e0c27471bcdb..61c8b450aafb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -21,10 +21,12 @@
#include <linux/sfp.h>
#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net_ctrl.h"
+#include "nfp_net_dp.h"
#include "nfp_net.h"
#include "nfp_port.h"
@@ -217,7 +219,7 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct nfp_net *nn = netdev_priv(netdev);
snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
- nn->fw_ver.resv, nn->fw_ver.class,
+ nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor);
strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
@@ -386,9 +388,10 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
+ u32 qc_max = nn->dev_info->max_qc_size;
- ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
- ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
+ ring->rx_max_pending = qc_max;
+ ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt;
ring->rx_pending = nn->dp.rxd_cnt;
ring->tx_pending = nn->dp.txd_cnt;
}
@@ -412,19 +415,22 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
+ u32 tx_dpp, qc_min, qc_max, rxd_cnt, txd_cnt;
struct nfp_net *nn = netdev_priv(netdev);
- u32 rxd_cnt, txd_cnt;
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ qc_min = nn->dev_info->min_qc_size;
+ qc_max = nn->dev_info->max_qc_size;
+ tx_dpp = nn->dp.ops->tx_min_desc_per_pkt;
/* Round up to supported values */
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
- txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+ if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
+ txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
return -EINVAL;
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 751f76cd4f79..ca4e05650fe6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -22,6 +22,7 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_dev.h"
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
@@ -116,13 +117,12 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
- nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
@@ -307,6 +307,7 @@ err_prev_deinit:
static int
nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
{
+ struct devlink *devlink = priv_to_devlink(pf);
u8 __iomem *ctrl_bar;
int err;
@@ -314,9 +315,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
err = nfp_app_init(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
if (err)
goto err_free;
@@ -343,9 +344,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_app_clean(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
@@ -354,14 +355,16 @@ err_free:
static void nfp_net_pf_app_clean(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
+
if (pf->ctrl_vnic) {
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_app_clean(pf->app);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_app_free(pf->app);
pf->app = NULL;
@@ -495,8 +498,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
}
cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
- mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
- NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
+ mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
+ nfp_qcp_queue_offset(pf->dev_info, 0),
+ pf->dev_info->qc_area_sz, &pf->qc_area);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(mem);
@@ -546,12 +550,13 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
struct nfp_port *port;
int err;
- lockdep_assert_held(&pf->lock);
+ devl_assert_locked(devlink);
/* Check for nfp_net_pci_remove() racing against us */
if (list_empty(&pf->vnics))
@@ -600,10 +605,11 @@ static void nfp_net_refresh_vnics(struct work_struct *work)
{
struct nfp_pf *pf = container_of(work, struct nfp_pf,
port_refresh_work);
+ struct devlink *devlink = priv_to_devlink(pf);
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
nfp_net_refresh_port_table_sync(pf);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
}
void nfp_net_refresh_port_table(struct nfp_port *port)
@@ -672,9 +678,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+ fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ fw_ver.extend, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_unmap;
}
@@ -690,7 +698,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
break;
default:
nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class,
+ fw_ver.extend, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_unmap;
@@ -709,7 +717,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_shared_buf_unreg;
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -729,7 +737,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_stop_app;
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -742,7 +750,7 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
@@ -756,10 +764,11 @@ err_unmap:
void nfp_net_pci_remove(struct nfp_pf *pf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net *nn, *next;
devlink_unregister(priv_to_devlink(pf));
- mutex_lock(&pf->lock);
+ devl_lock(devlink);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
continue;
@@ -771,7 +780,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
- mutex_unlock(&pf->lock);
+ devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 181ac8e789a3..ba3fa7eac98d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -20,7 +20,7 @@ struct net_device *
nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
{
return rcu_dereference_protected(set->reprs[id],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
}
static void
@@ -476,7 +476,7 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
int i;
reprs = rcu_dereference_protected(app->reprs[type],
- lockdep_is_held(&app->pf->lock));
+ nfp_app_is_locked(app));
if (!reprs)
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index a3db0cbf6425..786be58a907e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -4,8 +4,7 @@
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
-/**
- * SRIOV VF configuration.
+/* SRIOV VF configuration.
* The configuration memory begins with a mailbox region for communication with
* the firmware followed by individual VF entries.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
new file mode 100644
index 000000000000..86829446c637
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <net/xdp_sock_drv.h>
+#include <trace/events/xdp.h>
+
+#include "nfp_app.h"
+#include "nfp_net.h"
+#include "nfp_net_dp.h"
+#include "nfp_net_xsk.h"
+
+static void
+nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
+ struct xdp_buff *xdp)
+{
+ unsigned int headroom;
+
+ headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);
+
+ rx_ring->rxds[idx].fld.reserved = 0;
+ rx_ring->rxds[idx].fld.meta_len_dd = 0;
+
+ rx_ring->xsk_rxbufs[idx].xdp = xdp;
+ rx_ring->xsk_rxbufs[idx].dma_addr =
+ xsk_buff_xdp_get_frame_dma(xdp) + headroom;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
+{
+ rxbuf->dma_addr = 0;
+ rxbuf->xdp = NULL;
+}
+
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
+{
+ if (rxbuf->xdp)
+ xsk_buff_free(rxbuf->xdp);
+
+ nfp_net_xsk_rx_unstash(rxbuf);
+}
+
+void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ if (!rx_ring->cnt)
+ return;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
+}
+
+void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+ struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
+ struct xsk_buff_pool *pool = r_vec->xsk_pool;
+ unsigned int wr_idx, wr_ptr_add = 0;
+ struct xdp_buff *xdp;
+
+ while (nfp_net_rx_space(rx_ring)) {
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
+
+ xdp = xsk_buff_alloc(pool);
+ if (!xdp)
+ break;
+
+ nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
+
+ nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
+ rx_ring->xsk_rxbufs[wr_idx].dma_addr);
+
+ rx_ring->wr_p++;
+ wr_ptr_add++;
+ }
+
+ /* Ensure all records are visible before incrementing write counter. */
+ wmb();
+ nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
+}
+
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+ struct nfp_net_xsk_rx_buf *xrxbuf)
+{
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ nfp_net_xsk_rx_free(xrxbuf);
+}
+
+static void nfp_net_xsk_pool_unmap(struct device *dev,
+ struct xsk_buff_pool *pool)
+{
+ return xsk_pool_dma_unmap(pool, 0);
+}
+
+static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
+{
+ return xsk_pool_dma_map(pool, dev, 0);
+}
+
+int nfp_net_xsk_setup_pool(struct net_device *netdev,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ struct xsk_buff_pool *prev_pool;
+ struct nfp_net_dp *dp;
+ int err;
+
+ /* NFDK doesn't implement xsk yet. */
+ if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
+ return -EOPNOTSUPP;
+
+ /* Reject on old FWs so we can drop some checks on datapath. */
+ if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
+ return -EOPNOTSUPP;
+ if (!nn->dp.chained_metadata_format)
+ return -EOPNOTSUPP;
+
+ /* Install */
+ if (pool) {
+ err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
+ if (err)
+ return err;
+ }
+
+ /* Reconfig/swap */
+ dp = nfp_net_clone_dp(nn);
+ if (!dp) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ prev_pool = dp->xsk_pools[queue_id];
+ dp->xsk_pools[queue_id] = pool;
+
+ err = nfp_net_ring_reconfig(nn, dp, NULL);
+ if (err)
+ goto err_unmap;
+
+ /* Uninstall */
+ if (prev_pool)
+ nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);
+
+ return 0;
+err_unmap:
+ if (pool)
+ nfp_net_xsk_pool_unmap(nn->dp.dev, pool);
+
+ return err;
+}
+
+int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ /* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
+ * so it must be within our vector range. Moreover, our napi structs
+ * are statically allocated, so we can always kick them without worrying
+ * if reconfig is in progress or interface down.
+ */
+ napi_schedule(&nn->r_vecs[queue_id].napi);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
new file mode 100644
index 000000000000..6d281eb2fc1c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc */
+/* Copyright (C) 2021 Corigine, Inc */
+
+#ifndef _NFP_XSK_H_
+#define _NFP_XSK_H_
+
+#include <net/xdp_sock_drv.h>
+
+#define NFP_NET_XSK_TX_BATCH 16 /* XSK TX transmission batch size. */
+
+static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp,
+ unsigned int qid)
+{
+ return dp->xdp_prog && dp->xsk_pools[qid];
+}
+
+static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
+{
+ return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1;
+}
+
+static inline int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring)
+{
+ return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1;
+}
+
+void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf);
+void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
+ struct nfp_net_xsk_rx_buf *xrxbuf);
+int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool,
+ u16 queue_id);
+
+void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring);
+
+void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring);
+
+int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+
+#endif /* _NFP_XSK_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 87f2268b16d6..a51eb26dd977 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/etherdevice.h>
+#include "nfpcore/nfp_dev.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
@@ -36,11 +37,14 @@ struct nfp_net_vf {
static const char nfp_net_driver_name[] = "nfp_netvf";
-#define PCI_DEVICE_NFP6000VF 0x6003
static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
- { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800_VF,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
- PCI_ANY_ID, 0,
+ PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
+ },
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
},
{ 0, } /* Required last entry. */
};
@@ -65,6 +69,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ const struct nfp_dev_info *dev_info;
struct nfp_net_fw_version fw_ver;
int max_tx_rings, max_rx_rings;
u32 tx_bar_off, rx_bar_off;
@@ -78,6 +83,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
int stride;
int err;
+ dev_info = &nfp_dev_info[pci_id->driver_data];
+
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf)
return -ENOMEM;
@@ -95,8 +102,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- err = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+ err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
if (err)
goto err_pci_regions;
@@ -116,9 +122,11 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
+ fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+ fw_ver.extend, fw_ver.class,
+ fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
}
@@ -138,7 +146,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
break;
default:
dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
- fw_ver.resv, fw_ver.class,
+ fw_ver.extend, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
@@ -167,19 +175,19 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
- tx_bar_off = NFP_PCIE_QUEUE(startq);
+ tx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- rx_bar_off = NFP_PCIE_QUEUE(startq);
+ rx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, true,
+ max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
}
vf->nn = nn;
- nn->fw_ver = fw_ver;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 93c5bfc0510b..4f2308570dcf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -75,23 +75,6 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
-{
- struct nfp_port *port;
-
- lockdep_assert_held(&pf->lock);
-
- if (type != NFP_PORT_PHYS_PORT)
- return NULL;
-
- list_for_each_entry(port, &pf->ports, port_list)
- if (port->eth_id == id)
- return port;
-
- return NULL;
-}
-
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
{
if (!port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index ae4da189d955..d1ebe6c72f7f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -106,8 +106,6 @@ nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
int nfp_port_get_port_parent_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid);
-struct nfp_port *
-nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
@@ -132,8 +130,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
void nfp_devlink_port_type_eth_set(struct nfp_port *port);
void nfp_devlink_port_type_clear(struct nfp_port *port);
-/**
- * Mac stats (0x0000 - 0x0200)
+/* Mac stats (0x0000 - 0x0200)
* all counters are 64bit.
*/
#define NFP_MAC_STATS_BASE 0x0000
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 252fe06f58aa..0d1d39edbbae 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include "nfp_cpp.h"
+#include "nfp_dev.h"
#include "nfp6000/nfp6000.h"
@@ -100,11 +101,7 @@
#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
- (0x400 + ((bar) * 8 + (slot)) * 4)
-
-#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
- (((bar) * 8 + (slot)) * 4)
+#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index) ((bar_index) * 4)
/* The number of explicit BARs to reserve.
* Minimum is 0, maximum is 4 on the NFP6000.
@@ -145,6 +142,7 @@ struct nfp_bar {
struct nfp6000_pcie {
struct pci_dev *pdev;
struct device *dev;
+ const struct nfp_dev_info *dev_info;
/* PCI BAR management */
spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */
@@ -269,19 +267,16 @@ compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar,
static int
nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
{
- int base, slot;
- int xbar;
+ unsigned int xbar;
- base = bar->index >> 3;
- slot = bar->index & 7;
+ xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index);
if (nfp->iomem.csr) {
- xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
writel(newcfg, nfp->iomem.csr + xbar);
/* Readback to ensure BAR is flushed */
readl(nfp->iomem.csr + xbar);
} else {
- xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+ xbar += nfp->dev_info->pcie_cfg_expbar_offset;
pci_write_config_dword(nfp->pdev, xbar, newcfg);
}
@@ -622,7 +617,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
nfp6000_bar_write(nfp, bar, barcfg_msix_general);
- nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+ nfp->expl.data = bar->iomem + NFP_PCIE_SRAM +
+ nfp->dev_info->pcie_expl_offset;
switch (nfp->pdev->device) {
case PCI_DEVICE_ID_NETRONOME_NFP3800:
@@ -1306,18 +1302,20 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = {
/**
* nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
* @pdev: NFP6000 PCI device
+ * @dev_info: NFP ASIC params
*
* Return: NFP CPP handle
*/
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info)
{
struct nfp6000_pcie *nfp;
u16 interface;
int err;
/* Finished with card initialization. */
- dev_info(&pdev->dev,
- "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n");
+ dev_info(&pdev->dev, "Netronome Flow Processor %s PCIe Card Probe\n",
+ dev_info->chip_names);
pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
@@ -1328,6 +1326,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
nfp->dev = &pdev->dev;
nfp->pdev = pdev;
+ nfp->dev_info = dev_info;
init_waitqueue_head(&nfp->bar_waiters);
spin_lock_init(&nfp->bar_lock);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
index 6d1bffa6eac6..097660b673db 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -11,6 +11,7 @@
#include "nfp_cpp.h"
-struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
+struct nfp_cpp *
+nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info);
#endif /* NFP6000_PCIE_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 2dd0f5842873..3d379e937184 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -32,10 +32,6 @@
#define PCI_64BIT_BAR_COUNT 3
-/* NFP hardware vendor/device ids.
- */
-#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
-
#define NFP_CPP_NUM_TARGETS 16
/* Max size of area it should be safe to request */
#define NFP_CPP_SAFE_AREA_SIZE SZ_2M
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 85734c6badf5..508ae6b571ca 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -22,6 +22,7 @@
#include "nfp6000/nfp_xpb.h"
/* NFP6000 PL */
+#define NFP_PL_DEVICE_PART_NFP6000 0x6200
#define NFP_PL_DEVICE_ID 0x00000004
#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0)
#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16)
@@ -130,8 +131,12 @@ int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
return err;
*model = reg & NFP_PL_DEVICE_MODEL_MASK;
- if (*model & NFP_PL_DEVICE_ID_MASK)
- *model -= 0x10;
+ /* Disambiguate the NFP4000/NFP5000/NFP6000 chips */
+ if (FIELD_GET(NFP_PL_DEVICE_PART_MASK, reg) ==
+ NFP_PL_DEVICE_PART_NFP6000) {
+ if (*model & NFP_PL_DEVICE_ID_MASK)
+ *model -= 0x10;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
new file mode 100644
index 000000000000..28384d6d1c6f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+
+#include "nfp_dev.h"
+
+const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = {
+ [NFP_DEV_NFP3800] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(8, 0),
+ .qc_addr_offset = 0x400000,
+ .min_qc_size = 512,
+ .max_qc_size = SZ_64K,
+
+ .chip_names = "NFP3800",
+ .pcie_cfg_expbar_offset = 0x0a00,
+ .pcie_expl_offset = 0xd000,
+ .qc_area_sz = 0x100000,
+ },
+ [NFP_DEV_NFP3800_VF] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(8, 0),
+ .qc_addr_offset = 0,
+ .min_qc_size = 512,
+ .max_qc_size = SZ_64K,
+ },
+ [NFP_DEV_NFP6000] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(7, 0),
+ .qc_addr_offset = 0x80000,
+ .min_qc_size = 256,
+ .max_qc_size = SZ_256K,
+
+ .chip_names = "NFP4000/NFP5000/NFP6000",
+ .pcie_cfg_expbar_offset = 0x0400,
+ .pcie_expl_offset = 0x1000,
+ .qc_area_sz = 0x80000,
+ },
+ [NFP_DEV_NFP6000_VF] = {
+ .dma_mask = DMA_BIT_MASK(40),
+ .qc_idx_mask = GENMASK(7, 0),
+ .qc_addr_offset = 0,
+ .min_qc_size = 256,
+ .max_qc_size = SZ_256K,
+ },
+};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
new file mode 100644
index 000000000000..d4189869cf7b
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#ifndef _NFP_DEV_H_
+#define _NFP_DEV_H_
+
+#include <linux/types.h>
+
+enum nfp_dev_id {
+ NFP_DEV_NFP3800,
+ NFP_DEV_NFP3800_VF,
+ NFP_DEV_NFP6000,
+ NFP_DEV_NFP6000_VF,
+ NFP_DEV_CNT,
+};
+
+struct nfp_dev_info {
+ /* Required fields */
+ u64 dma_mask;
+ u32 qc_idx_mask;
+ u32 qc_addr_offset;
+ u32 min_qc_size;
+ u32 max_qc_size;
+
+ /* PF-only fields */
+ const char *chip_names;
+ u32 pcie_cfg_expbar_offset;
+ u32 pcie_expl_offset;
+ u32 qc_area_sz;
+};
+
+extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT];
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 10e7d8b21c46..730fea214b8a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -513,7 +513,7 @@ nfp_nsp_command_buf_dma_sg(struct nfp_nsp *nsp,
dma_size = BIT_ULL(dma_order);
nseg = DIV_ROUND_UP(max_size, chunk_size);
- chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL);
+ chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL);
if (!chunks)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 07a00dd9cfe0..4b3482ce90a1 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -324,8 +324,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
+ sizeof(*priv->rx_bd_v) *
((i + 1) % RX_BD_NUM));
- skb = netdev_alloc_skb_ip_align(ndev,
- NIXGE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 12105f62cbdd..03650022d444 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -191,7 +191,7 @@ IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
-and an AlphaStation to verifty the Alpha port!
+and an AlphaStation to verify the Alpha port!
IVb. References
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 5e25411ff02f..602f4d45d529 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,7 +18,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define DEVCMD_TIMEOUT 10
+#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
@@ -78,6 +78,9 @@ void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
+int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
+void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
+ int err);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
@@ -89,4 +92,6 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
+const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr);
+
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 7e296fa71b36..6ffc62c41165 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -109,8 +109,8 @@ void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page)
static void ionic_vf_dealloc_locked(struct ionic *ionic)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
- dma_addr_t dma = 0;
int i;
if (!ionic->vfs)
@@ -120,9 +120,8 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
v = &ionic->vfs[i];
if (v->stats_pa) {
- (void)ionic_set_vf_config(ionic, i,
- IONIC_VF_ATTR_STATSADDR,
- (u8 *)&dma);
+ vfc.stats_pa = 0;
+ (void)ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
@@ -143,6 +142,7 @@ static void ionic_vf_dealloc(struct ionic *ionic)
static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_STATSADDR };
struct ionic_vf *v;
int err = 0;
int i;
@@ -166,9 +166,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
}
ionic->num_vfs++;
+
/* ignore failures from older FW, we just won't get stats */
- (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR,
- (u8 *)&v->stats_pa);
+ vfc.stats_pa = cpu_to_le64(v->stats_pa);
+ (void)ionic_set_vf_config(ionic, i, &vfc);
}
out:
@@ -331,6 +332,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_deregister_lifs;
}
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
return 0;
err_out_deregister_lifs:
@@ -348,7 +352,6 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
- del_timer_sync(&ionic->watchdog_timer);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d57e80d44c9d..9d0514cfeb5c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -33,7 +33,8 @@ static void ionic_watchdog_cb(struct timer_list *t)
!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
- if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state)) {
+ if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state) &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "rxmode change dropped\n");
@@ -46,6 +47,24 @@ static void ionic_watchdog_cb(struct timer_list *t)
}
}
+static void ionic_watchdog_init(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+
+ timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
+ ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+
+ /* set times to ensure the first check will proceed */
+ atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
+ idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
+ /* init as ready, so no transition if the first check succeeds */
+ idev->last_fw_hb = 0;
+ idev->fw_hb_ready = true;
+ idev->fw_status_ready = true;
+ idev->fw_generation = IONIC_FW_STS_F_GENERATION &
+ ioread8(&idev->dev_info_regs->fw_status);
+}
+
void ionic_init_devinfo(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -109,21 +128,7 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
- ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
-
- /* set times to ensure the first check will proceed */
- atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
- idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
- /* init as ready, so no transition if the first check succeeds */
- idev->last_fw_hb = 0;
- idev->fw_hb_ready = true;
- idev->fw_status_ready = true;
- idev->fw_generation = IONIC_FW_STS_F_GENERATION &
- ioread8(&idev->dev_info_regs->fw_status);
-
- mod_timer(&ionic->watchdog_timer,
- round_jiffies(jiffies + ionic->watchdog_period));
+ ionic_watchdog_init(ionic);
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -132,10 +137,21 @@ int ionic_dev_setup(struct ionic *ionic)
}
/* Devcmd Interface */
+bool ionic_is_fw_running(struct ionic_dev *idev)
+{
+ u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
+
+ /* firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
+}
+
int ionic_heartbeat_check(struct ionic *ionic)
{
- struct ionic_dev *idev = &ionic->idev;
unsigned long check_time, last_check_time;
+ struct ionic_dev *idev = &ionic->idev;
+ struct ionic_lif *lif = ionic->lif;
bool fw_status_ready = true;
bool fw_hb_ready;
u8 fw_generation;
@@ -155,13 +171,10 @@ do_check_time:
goto do_check_time;
}
- /* firmware is useful only if the running bit is set and
- * fw_status != 0xff (bad PCI read)
- * If fw_status is not ready don't bother with the generation.
- */
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) {
+ /* If fw_status is not ready don't bother with the generation */
+ if (!ionic_is_fw_running(idev)) {
fw_status_ready = false;
} else {
fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
@@ -176,26 +189,40 @@ do_check_time:
* the down, the next watchdog will see the fw is up
* and the generation value stable, so will trigger
* the fw-up activity.
+ *
+ * If we had already moved to FW_RESET from a RESET event,
+ * it is possible that we never saw the fw_status go to 0,
+ * so we fake the current idev->fw_status_ready here to
+ * force the transition and get FW up again.
*/
- fw_status_ready = false;
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ idev->fw_status_ready = false; /* go to running */
+ else
+ fw_status_ready = false; /* go to down */
}
}
+ dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n",
+ fw_status, fw_status_ready, idev->fw_status_ready,
+ idev->last_fw_hb, lif->state[0]);
+
/* is this a transition? */
- if (fw_status_ready != idev->fw_status_ready) {
- struct ionic_lif *lif = ionic->lif;
+ if (fw_status_ready != idev->fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
bool trigger = false;
idev->fw_status_ready = fw_status_ready;
- if (!fw_status_ready) {
- dev_info(ionic->dev, "FW stopped %u\n", fw_status);
- if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- trigger = true;
- } else {
- dev_info(ionic->dev, "FW running %u\n", fw_status);
- if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- trigger = true;
+ if (!fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status);
+ trigger = true;
+
+ } else if (fw_status_ready &&
+ test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ dev_info(ionic->dev, "FW running 0x%02x\n", fw_status);
+ trigger = true;
}
if (trigger) {
@@ -210,12 +237,14 @@ do_check_time:
}
}
- if (!fw_status_ready)
+ if (!idev->fw_status_ready)
return -ENXIO;
- /* wait at least one watchdog period since the last heartbeat */
+ /* Because of some variability in the actual FW heartbeat, we
+ * wait longer than the DEVCMD_TIMEOUT before checking again.
+ */
last_check_time = idev->last_hb_time;
- if (time_before(check_time, last_check_time + ionic->watchdog_period))
+ if (time_before(check_time, last_check_time + DEVCMD_TIMEOUT * 2 * HZ))
return 0;
fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
@@ -392,60 +421,63 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
}
/* VF commands */
-int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
+int ionic_set_vf_config(struct ionic *ionic, int vf,
+ struct ionic_vf_setattr_cmd *vfc)
{
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
- .vf_setattr.attr = attr,
+ .vf_setattr.attr = vfc->attr,
.vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
+ memcpy(cmd.vf_setattr.pad, vfc->pad, sizeof(vfc->pad));
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ return err;
+}
+
+int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
+ struct ionic_vf_getattr_comp *comp)
+{
+ union ionic_dev_cmd cmd = {
+ .vf_getattr.opcode = IONIC_CMD_VF_GETATTR,
+ .vf_getattr.attr = attr,
+ .vf_getattr.vf_index = cpu_to_le16(vf),
+ };
+ int err;
+
+ if (vf >= ionic->num_vfs)
+ return -EINVAL;
+
switch (attr) {
case IONIC_VF_ATTR_SPOOFCHK:
- cmd.vf_setattr.spoofchk = *data;
- dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_TRUST:
- cmd.vf_setattr.trust = *data;
- dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_LINKSTATE:
- cmd.vf_setattr.linkstate = *data;
- dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
- __func__, vf, *data);
- break;
case IONIC_VF_ATTR_MAC:
- ether_addr_copy(cmd.vf_setattr.macaddr, data);
- dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
- __func__, vf, data);
- break;
case IONIC_VF_ATTR_VLAN:
- cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data);
- dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
- __func__, vf, *(u16 *)data);
- break;
case IONIC_VF_ATTR_RATE:
- cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data);
- dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
- __func__, vf, *(u32 *)data);
break;
case IONIC_VF_ATTR_STATSADDR:
- cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data);
- dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n",
- __func__, vf, *(u64 *)data);
- break;
default:
return -EINVAL;
}
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_go(&ionic->idev, &cmd);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ err = ionic_dev_cmd_wait_nomsg(ionic, DEVCMD_TIMEOUT);
+ memcpy_fromio(comp, &ionic->idev.dev_cmd_regs->comp.vf_getattr,
+ sizeof(*comp));
mutex_unlock(&ionic->dev_cmd_lock);
+ if (err && comp->status != IONIC_RC_ENOSUPP)
+ ionic_dev_cmd_dev_err_print(ionic, cmd.vf_getattr.opcode,
+ comp->status, err);
+
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index e5acf3bd62b2..563c302eb033 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -318,7 +318,10 @@ void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable);
void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type);
void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
-int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data);
+int ionic_set_vf_config(struct ionic *ionic, int vf,
+ struct ionic_vf_setattr_cmd *vfc);
+int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
+ struct ionic_vf_getattr_comp *comp);
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver);
@@ -353,5 +356,6 @@ void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
int ionic_heartbeat_check(struct ionic *ionic);
+bool ionic_is_fw_running(struct ionic_dev *idev);
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 386a5cf1e224..01c22701482d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -74,10 +74,10 @@ static void ionic_get_drvinfo(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
+ strscpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
+ strscpy(drvinfo->bus_info, ionic_bus_info(ionic),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 278610ed7227..4a90f611c611 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -759,7 +759,7 @@ enum ionic_txq_desc_opcode {
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
* Offload 16-bit checksum computation to hardware.
* If @csum_l3 is set then the packet's L3 checksum is
- * updated. Similarly, if @csum_l4 is set the the L4
+ * updated. Similarly, if @csum_l4 is set the L4
* checksum is updated. If @encap is set then encap header
* checksums are also updated.
*
@@ -1368,9 +1368,9 @@ union ionic_port_config {
* @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
- * @link_down_count: number of times link went from from up to down
+ * @link_down_count: number of times link went from up to down
* @fec_type: fec type (enum ionic_port_fec_type)
- * @xcvr: tranceiver status
+ * @xcvr: transceiver status
*/
struct ionic_port_status {
__le32 id;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 2ff7be17e5af..f3568901eb91 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
+#include <linux/vmalloc.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -393,11 +394,11 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_qcq_intr_free(lif, qcq);
if (qcq->cq.info) {
- devm_kfree(dev, qcq->cq.info);
+ vfree(qcq->cq.info);
qcq->cq.info = NULL;
}
if (qcq->q.info) {
- devm_kfree(dev, qcq->q.info);
+ vfree(qcq->q.info);
qcq->q.info = NULL;
}
}
@@ -528,8 +529,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev;
new->flags = flags;
- new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
- GFP_KERNEL);
+ new->q.info = vzalloc(num_descs * sizeof(*new->q.info));
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
@@ -550,8 +550,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (err)
goto err_out;
- new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
- GFP_KERNEL);
+ new->cq.info = vzalloc(num_descs * sizeof(*new->cq.info));
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
@@ -640,14 +639,14 @@ err_out_free_cq:
err_out_free_q:
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
- devm_kfree(dev, new->cq.info);
+ vfree(new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
ionic_intr_free(lif->ionic, new->intr.index);
}
err_out_free_q_info:
- devm_kfree(dev, new->q.info);
+ vfree(new->q.info);
err_out_free_qcq:
devm_kfree(dev, new);
err_out:
@@ -1112,12 +1111,17 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
break;
case IONIC_EVENT_RESET:
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "Reset event dropped\n");
- } else {
- work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ if (lif->ionic->idev.fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "Reset event dropped\n");
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
}
break;
default:
@@ -1782,7 +1786,7 @@ static void ionic_lif_quiesce(struct ionic_lif *lif)
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
- netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
+ netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
}
static void ionic_txrx_disable(struct ionic_lif *lif)
@@ -2152,6 +2156,76 @@ static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd
}
}
+static int ionic_update_cached_vf_config(struct ionic *ionic, int vf)
+{
+ struct ionic_vf_getattr_comp comp = { 0 };
+ int err;
+ u8 attr;
+
+ attr = IONIC_VF_ATTR_VLAN;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].vlanid = comp.vlanid;
+
+ attr = IONIC_VF_ATTR_SPOOFCHK;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].spoofchk = comp.spoofchk;
+
+ attr = IONIC_VF_ATTR_LINKSTATE;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err) {
+ switch (comp.linkstate) {
+ case IONIC_VF_LINK_STATUS_UP:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ break;
+ case IONIC_VF_LINK_STATUS_DOWN:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ break;
+ case IONIC_VF_LINK_STATUS_AUTO:
+ ionic->vfs[vf].linkstate = IFLA_VF_LINK_STATE_AUTO;
+ break;
+ default:
+ dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
+ break;
+ }
+ }
+
+ attr = IONIC_VF_ATTR_RATE;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].maxrate = comp.maxrate;
+
+ attr = IONIC_VF_ATTR_TRUST;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ionic->vfs[vf].trusted = comp.trust;
+
+ attr = IONIC_VF_ATTR_MAC;
+ err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
+ if (err && comp.status != IONIC_RC_ENOSUPP)
+ goto err_out;
+ if (!err)
+ ether_addr_copy(ionic->vfs[vf].macaddr, comp.macaddr);
+
+err_out:
+ if (err)
+ dev_err(ionic->dev, "Failed to get %s for VF %d\n",
+ ionic_vf_attr_to_str(attr), vf);
+
+ return err;
+}
+
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
@@ -2167,14 +2241,18 @@ static int ionic_get_vf_config(struct net_device *netdev,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ivf->vf = vf;
- ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
- ivf->qos = 0;
- ivf->spoofchk = ionic->vfs[vf].spoofchk;
- ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
- ivf->trusted = ionic->vfs[vf].trusted;
- ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ ivf->vf = vf;
+ ivf->qos = 0;
+
+ ret = ionic_update_cached_vf_config(ionic, vf);
+ if (!ret) {
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
+ ivf->spoofchk = ionic->vfs[vf].spoofchk;
+ ivf->linkstate = ionic->vfs[vf].linkstate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
+ ivf->trusted = ionic->vfs[vf].trusted;
+ ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
+ }
}
up_read(&ionic->vf_op_lock);
@@ -2220,6 +2298,7 @@ static int ionic_get_vf_stats(struct net_device *netdev, int vf,
static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2235,7 +2314,11 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
+ ether_addr_copy(vfc.macaddr, mac);
+ dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
+ __func__, vf, vfc.macaddr);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ether_addr_copy(ionic->vfs[vf].macaddr, mac);
}
@@ -2247,6 +2330,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 proto)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2269,8 +2353,11 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
+ vfc.vlanid = cpu_to_le16(vlan);
+ dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
+ __func__, vf, le16_to_cpu(vfc.vlanid));
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
@@ -2282,6 +2369,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int ionic_set_vf_rate(struct net_device *netdev, int vf,
int tx_min, int tx_max)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
int ret;
@@ -2298,8 +2386,11 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
+ vfc.maxrate = cpu_to_le32(tx_max);
+ dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
+ __func__, vf, le32_to_cpu(vfc.maxrate));
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
@@ -2310,9 +2401,9 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data = set; /* convert to u8 for config */
int ret;
if (!netif_device_present(netdev))
@@ -2323,10 +2414,13 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_SPOOFCHK, &data);
+ vfc.spoofchk = set;
+ dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
+ __func__, vf, vfc.spoofchk);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- ionic->vfs[vf].spoofchk = data;
+ ionic->vfs[vf].spoofchk = set;
}
up_write(&ionic->vf_op_lock);
@@ -2335,9 +2429,9 @@ static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data = set; /* convert to u8 for config */
int ret;
if (!netif_device_present(netdev))
@@ -2348,10 +2442,13 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_TRUST, &data);
+ vfc.trust = set;
+ dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
+ __func__, vf, vfc.trust);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- ionic->vfs[vf].trusted = data;
+ ionic->vfs[vf].trusted = set;
}
up_write(&ionic->vf_op_lock);
@@ -2360,20 +2457,21 @@ static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
{
+ struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- u8 data;
+ u8 vfls;
int ret;
switch (set) {
case IFLA_VF_LINK_STATE_ENABLE:
- data = IONIC_VF_LINK_STATUS_UP;
+ vfls = IONIC_VF_LINK_STATUS_UP;
break;
case IFLA_VF_LINK_STATE_DISABLE:
- data = IONIC_VF_LINK_STATUS_DOWN;
+ vfls = IONIC_VF_LINK_STATUS_DOWN;
break;
case IFLA_VF_LINK_STATE_AUTO:
- data = IONIC_VF_LINK_STATUS_AUTO;
+ vfls = IONIC_VF_LINK_STATUS_AUTO;
break;
default:
return -EINVAL;
@@ -2387,8 +2485,11 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ret = ionic_set_vf_config(ionic, vf,
- IONIC_VF_ATTR_LINKSTATE, &data);
+ vfc.linkstate = vfls;
+ dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
+ __func__, vf, vfc.linkstate);
+
+ ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
ionic->vfs[vf].linkstate = set;
}
@@ -2835,6 +2936,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
mutex_unlock(&lif->queue_lock);
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -2934,8 +3036,6 @@ void ionic_lif_free(struct ionic_lif *lif)
/* unmap doorbell page */
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
lif->kern_dbpage = NULL;
- kfree(lif->dbid_inuse);
- lif->dbid_inuse = NULL;
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
@@ -3135,22 +3235,12 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL;
}
- lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
- if (!lif->dbid_inuse) {
- dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
- return -ENOMEM;
- }
-
- /* first doorbell id reserved for kernel (dbid aka pid == zero) */
- set_bit(0, lif->dbid_inuse);
lif->kern_pid = 0;
-
dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
if (!lif->kern_dbpage) {
dev_err(dev, "Cannot map dbpage, aborting\n");
- err = -ENOMEM;
- goto err_out_free_dbid;
+ return -ENOMEM;
}
err = ionic_lif_adminq_init(lif);
@@ -3186,15 +3276,13 @@ int ionic_lif_init(struct ionic_lif *lif)
return 0;
err_out_notifyq_deinit:
+ napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
err_out_adminq_deinit:
ionic_lif_qcq_deinit(lif, lif->adminqcq);
ionic_lif_reset(lif);
ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
lif->kern_dbpage = NULL;
-err_out_free_dbid:
- kfree(lif->dbid_inuse);
- lif->dbid_inuse = NULL;
return err;
}
@@ -3214,7 +3302,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
},
};
- strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
sizeof(ctx.cmd.lif_setattr.name));
ionic_adminq_post_wait(lif, &ctx);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9f7ab2f17f93..a53984bf3544 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -135,6 +135,7 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
IONIC_LIF_F_FW_RESET,
+ IONIC_LIF_F_FW_STOPPING,
IONIC_LIF_F_SPLIT_INTR,
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
@@ -213,7 +214,6 @@ struct ionic_lif {
u32 rx_coalesce_hw; /* what the hw is using */
u32 tx_coalesce_usecs; /* what the user asked for */
u32 tx_coalesce_hw; /* what the hw is using */
- unsigned long *dbid_inuse;
unsigned int dbid_count;
struct ionic_phc *phc;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 875f4ec42efe..4029b4e021f8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -188,6 +188,28 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
}
}
+const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr)
+{
+ switch (attr) {
+ case IONIC_VF_ATTR_SPOOFCHK:
+ return "IONIC_VF_ATTR_SPOOFCHK";
+ case IONIC_VF_ATTR_TRUST:
+ return "IONIC_VF_ATTR_TRUST";
+ case IONIC_VF_ATTR_LINKSTATE:
+ return "IONIC_VF_ATTR_LINKSTATE";
+ case IONIC_VF_ATTR_MAC:
+ return "IONIC_VF_ATTR_MAC";
+ case IONIC_VF_ATTR_VLAN:
+ return "IONIC_VF_ATTR_VLAN";
+ case IONIC_VF_ATTR_RATE:
+ return "IONIC_VF_ATTR_RATE";
+ case IONIC_VF_ATTR_STATSADDR:
+ return "IONIC_VF_ATTR_STATSADDR";
+ default:
+ return "IONIC_VF_ATTR_UNKNOWN";
+ }
+}
+
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_desc_info *desc_info;
@@ -215,9 +237,13 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err)
{
+ const char *stat_str;
+
+ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
+ ionic_error_to_str(status);
+
netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(status), err);
+ ionic_opcode_to_str(opcode), opcode, stat_str, err);
}
static int ionic_adminq_check_err(struct ionic_lif *lif,
@@ -318,6 +344,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
+ ctx->comp.comp.status = IONIC_RC_ERROR;
return err;
}
@@ -331,11 +358,15 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
if (remaining)
break;
- /* interrupt the wait if FW stopped */
- if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ /* force a check of FW status and break out if FW reset */
+ (void)ionic_heartbeat_check(lif->ionic);
+ if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !lif->ionic->idev.fw_status_ready) ||
+ test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
if (do_msg)
- netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
- name, ctx->cmd.cmd.opcode);
+ netdev_warn(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ ctx->comp.comp.status = IONIC_RC_ERROR;
return -ENXIO;
}
@@ -370,21 +401,34 @@ int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ struct ionic_dev *idev = &ionic->idev;
- iowrite32(0, &regs->doorbell);
- memset_io(&regs->cmd, 0, sizeof(regs->cmd));
+ iowrite32(0, &idev->dev_cmd_regs->doorbell);
+ memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
-int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
+ int err)
+{
+ const char *stat_str;
+
+ stat_str = (err == -ETIMEDOUT) ? "TIMEOUT" :
+ ionic_error_to_str(status);
+
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode, stat_str, err);
+}
+
+static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
+ const bool do_msg)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
+ int done = 0;
+ bool fw_up;
int opcode;
- int hb = 0;
- int done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -394,31 +438,24 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
try_again:
opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
- do {
+ for (fw_up = ionic_is_fw_running(idev);
+ !done && fw_up && time_before(jiffies, max_wait);
+ fw_up = ionic_is_fw_running(idev)) {
done = ionic_dev_cmd_done(idev);
if (done)
break;
usleep_range(100, 200);
-
- /* Don't check the heartbeat on FW_CONTROL commands as they are
- * notorious for interrupting the firmware's heartbeat update.
- */
- if (opcode != IONIC_CMD_FW_CONTROL)
- hb = ionic_heartbeat_check(ionic);
- } while (!done && !hb && time_before(jiffies, max_wait));
+ }
duration = jiffies - start_time;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
- if (!done && hb) {
- /* It is possible (but unlikely) that FW was busy and missed a
- * heartbeat check but is still alive and will process this
- * request, so don't clean the dev_cmd in this case.
- */
- dev_dbg(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
- ionic_opcode_to_str(opcode), opcode);
+ if (!done && !fw_up) {
+ ionic_dev_cmd_clean(ionic);
+ dev_warn(ionic->dev, "DEVCMD %s (%d) interrupted - FW is down\n",
+ ionic_opcode_to_str(opcode), opcode);
return -ENXIO;
}
@@ -444,9 +481,9 @@ try_again:
}
if (!(opcode == IONIC_CMD_FW_CONTROL && err == IONIC_RC_EAGAIN))
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(err), err);
+ if (do_msg)
+ ionic_dev_cmd_dev_err_print(ionic, opcode, err,
+ ionic_error_to_errno(err));
return ionic_error_to_errno(err);
}
@@ -454,6 +491,16 @@ try_again:
return 0;
}
+int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
+{
+ return __ionic_dev_cmd_wait(ionic, max_seconds, true);
+}
+
+int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_seconds)
+{
+ return __ionic_dev_cmd_wait(ionic, max_seconds, false);
+}
+
int ionic_setup(struct ionic *ionic)
{
int err;
@@ -540,6 +587,9 @@ int ionic_reset(struct ionic *ionic)
struct ionic_dev *idev = &ionic->idev;
int err;
+ if (!ionic_is_fw_running(idev))
+ return 0;
+
mutex_lock(&ionic->dev_cmd_lock);
ionic_dev_cmd_reset(idev);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
@@ -612,15 +662,17 @@ int ionic_port_init(struct ionic *ionic)
int ionic_port_reset(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
- int err;
+ int err = 0;
if (!idev->port_info)
return 0;
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_port_reset(idev);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
- mutex_unlock(&ionic->dev_cmd_lock);
+ if (ionic_is_fw_running(idev)) {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_reset(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ }
dma_free_coherent(ionic->dev, idev->port_info_sz,
idev->port_info, idev->port_info_pa);
@@ -628,9 +680,6 @@ int ionic_port_reset(struct ionic *ionic)
idev->port_info = NULL;
idev->port_info_pa = 0;
- if (err)
- dev_err(ionic->dev, "Failed to reset port\n");
-
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index f6e785f949f9..b7363376dfc8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -376,10 +376,24 @@ static int ionic_lif_filter_add(struct ionic_lif *lif,
spin_unlock_bh(&lif->rx_filters.lock);
- if (err == -ENOSPC) {
- if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
- lif->max_vlans = lif->nvlans;
+ /* store the max_vlans limit that we found */
+ if (err == -ENOSPC &&
+ le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+
+ /* Prevent unnecessary error messages on recoverable
+ * errors as the filter will get retried on the next
+ * sync attempt.
+ */
+ switch (err) {
+ case -ENOSPC:
+ case -ENXIO:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ case -EBUSY:
return 0;
+ default:
+ break;
}
ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
@@ -494,9 +508,22 @@ static int ionic_lif_filter_del(struct ionic_lif *lif,
spin_unlock_bh(&lif->rx_filters.lock);
if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ switch (err) {
+ /* ignore these errors */
+ case -EEXIST:
+ case -ENXIO:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ case -EBUSY:
+ case 0:
+ break;
+ default:
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
return err;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index fd6806b4a1b9..9859a4432985 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,7 +151,6 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 94384f5d2a22..f54035455ad6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -10,7 +10,6 @@
#include "ionic_lif.h"
#include "ionic_txrx.h"
-
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
@@ -669,27 +668,37 @@ dma_fail:
return -EIO;
}
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info)
+{
+ struct ionic_buf_info *buf_info = desc_info->bufs;
+ struct device *dev = q->dev;
+ unsigned int i;
+
+ if (!desc_info->nbufs)
+ return;
+
+ dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ buf_info++;
+ for (i = 1; i < desc_info->nbufs; i++, buf_info++)
+ dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+
+ desc_info->nbufs = 0;
+}
+
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
void *cb_arg)
{
- struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb = cb_arg;
- struct device *dev = q->dev;
- unsigned int i;
u16 qi;
- if (desc_info->nbufs) {
- dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- buf_info++;
- for (i = 1; i < desc_info->nbufs; i++, buf_info++)
- dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
- buf_info->len, DMA_TO_DEVICE);
- }
+ ionic_tx_desc_unmap_bufs(q, desc_info);
if (!skb)
return;
@@ -931,8 +940,11 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err)
+ if (err) {
+ /* clean up mapping from ionic_tx_map_skb */
+ ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
+ }
if (encap)
hdrlen = skb_inner_transport_header(skb) - skb->data +
@@ -1003,8 +1015,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
}
-static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
@@ -1038,12 +1050,10 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
stats->crc32_csum++;
else
stats->csum++;
-
- return 0;
}
-static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_desc *desc = desc_info->txq_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
@@ -1074,12 +1084,10 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_offset = 0;
stats->csum_none++;
-
- return 0;
}
-static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
+ struct ionic_desc_info *desc_info)
{
struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
@@ -1093,31 +1101,24 @@ static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
}
stats->frags += skb_shinfo(skb)->nr_frags;
-
- return 0;
}
static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_desc_info *desc_info = &q->info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- int err;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
- err = ionic_tx_calc_csum(q, skb, desc_info);
+ ionic_tx_calc_csum(q, skb, desc_info);
else
- err = ionic_tx_calc_no_csum(q, skb, desc_info);
- if (err)
- return err;
+ ionic_tx_calc_no_csum(q, skb, desc_info);
/* add frags */
- err = ionic_tx_skb_frags(q, skb, desc_info);
- if (err)
- return err;
+ ionic_tx_skb_frags(q, skb, desc_info);
skb_tx_timestamp(skb);
stats->pkts++;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index cc4ec2bb36db..672480c9d195 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3098,6 +3098,9 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
continue;
}
+ /* Some flows may keep variable set */
+ p_hwfn->mcp_info->mcp_handling_status = 0;
+
rc = qed_calc_hw_mode(p_hwfn);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0ce37f2460a4..407029a36fa1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1835,8 +1835,6 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
if (!allocated_mem)
return NULL;
- memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
-
/* For each Storm, set physical address in RAM */
while (buf_offset < buf_size) {
struct phys_mem_desc *storm_mem_desc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index da1eadabcb41..9fb1fa479d4b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -140,7 +140,7 @@ static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
- struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp;
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
@@ -249,6 +249,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Initialize the MFW spinlock */
spin_lock_init(&p_info->cmd_lock);
spin_lock_init(&p_info->link_lock);
+ spin_lock_init(&p_info->unload_lock);
INIT_LIST_HEAD(&p_info->cmd_list);
@@ -614,12 +615,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
usecs);
}
-int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ bool can_sleep)
{
struct qed_mcp_mb_params mb_params;
int rc;
@@ -627,6 +629,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
+ mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -638,6 +641,28 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, true));
+}
+
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, false));
+}
+
static int
qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1071,10 +1096,15 @@ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+#define MFW_COMPLETION_MAX_ITER 5000
+#define MFW_COMPLETION_INTERVAL_MS 1
+
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
+ u32 cnt = MFW_COMPLETION_MAX_ITER;
u32 wol_param;
+ int rc;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
@@ -1097,7 +1127,23 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
mb_params.param = wol_param;
mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
- return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ set_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ while (test_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status) && --cnt)
+ msleep(MFW_COMPLETION_INTERVAL_MS);
+
+ if (!cnt)
+ DP_NOTICE(p_hwfn,
+ "Failed to wait MFW event completion after %d msec\n",
+ MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS);
+
+ return rc;
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1728,8 +1774,8 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
/* Acknowledge the MFW */
- qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
- &param);
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
}
static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1766,8 +1812,8 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
- qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
- &resp, &param);
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
}
static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
@@ -1997,6 +2043,19 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
"Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ if (test_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status)) {
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+ DP_INFO(p_hwfn,
+ "Msg [%d] is bypassed on unload flow\n", i);
+ continue;
+ }
+
+ set_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
switch (i) {
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
@@ -2050,6 +2109,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
}
+
+ clear_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
}
/* ACK everything */
@@ -3675,8 +3737,8 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
{
int rc;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
- p_mcp_resp, p_mcp_param);
+ rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD,
+ param, p_mcp_resp, p_mcp_param);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 369e1892450a..9bd0565fe8ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -393,11 +393,12 @@ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * qed_mcp_cmd(): General function for sending commands to the MCP
+ * qed_mcp_cmd(): Sleepable function for sending commands to the MCP
* mailbox. It acquire mutex lock for the entire
* operation, from sending the request until the MCP
* response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * to 5 seconds every 10ms. Should not be called from atomic
+ * context.
*
* @p_hwfn: HW device data.
* @p_ptt: PTT required for register access.
@@ -417,6 +418,31 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
+ * qed_mcp_cmd_nosleep(): Function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 10us. Should be called when sleep
+ * is not allowed.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
+ * response
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
* qed_mcp_drain(): drains the nig, allowing completion to pass in
* case of pauses.
* (Should be called only from sleepable context)
@@ -762,6 +788,14 @@ struct qed_mcp_info {
/* S/N for debug data mailbox commands */
atomic_t dbg_data_seq;
+
+ /* Spinlock used to sync the flag mcp_handling_status with
+ * the mfw events handler
+ */
+ spinlock_t unload_lock;
+ unsigned long mcp_handling_status;
+#define QED_MCP_BYPASS_PROC_BIT 0
+#define QED_MCP_IN_PROCESSING_BIT 1
};
struct qed_mcp_mb_params {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 48cf4355bc47..0848b5529d48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2984,12 +2984,16 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
struct qed_filter_accept_flags *flags = &params->accept_flags;
struct qed_public_vf_info *vf_info;
+ u16 tlv_mask;
+
+ tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
+ BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
/* Untrusted VFs can't even be trusted to know that fact.
* Simply indicate everything is configured fine, and trace
* configuration 'behind their back'.
*/
- if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+ if (!(*tlvs & tlv_mask))
return 0;
vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
@@ -3006,6 +3010,13 @@ static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
flags->tx_accept_filter &= ~mask;
}
+ if (params->update_accept_any_vlan_flg) {
+ vf_info->accept_any_vlan = params->accept_any_vlan;
+
+ if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
+ params->accept_any_vlan = false;
+ }
+
return 0;
}
@@ -4719,6 +4730,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
tx_rate = vf_info->tx_rate;
ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+ ivi->trusted = vf_info->is_trusted_request;
return 0;
}
@@ -5149,6 +5161,12 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
params.update_ctl_frame_check = 1;
params.mac_chk_en = !vf_info->is_trusted_configured;
+ params.update_accept_any_vlan_flg = 0;
+
+ if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = vf_info->accept_any_vlan;
+ }
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
@@ -5164,13 +5182,20 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
if (!vf_info->is_trusted_configured) {
flags->rx_accept_filter &= ~mask;
flags->tx_accept_filter &= ~mask;
+ params.accept_any_vlan = false;
}
if (flags->update_rx_mode_config ||
flags->update_tx_mode_config ||
- params.update_ctl_frame_check)
+ params.update_ctl_frame_check ||
+ params.update_accept_any_vlan_flg) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
+ vf_info->is_trusted_configured ? "trusted" : "untrusted",
+ vf->abs_vf_id, vf->relative_vf_id);
qed_sp_vport_update(hwfn, &params,
QED_SPQ_MODE_EBLOCK, NULL);
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index f448e3dd6c8b..6ee2493de164 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -62,6 +62,7 @@ struct qed_public_vf_info {
bool is_trusted_request;
u8 rx_accept_mode;
u8 tx_accept_mode;
+ bool accept_any_vlan;
};
struct qed_iov_vf_init_params {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index e10fe071a40f..54a2d653be63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1355,7 +1355,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
memset(data, 0, stats->n_stats * sizeof(u64));
- for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
tx_ring = &adapter->tx_ring[ring];
data = qlcnic_fill_tx_queue_stats(data, tx_ring);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 29cdcb2285b1..bcf3746220df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
#include <linux/ipv6.h>
#include <net/checksum.h>
#include <linux/printk.h>
+#include <linux/jiffies.h>
#include "qlcnic.h"
@@ -332,7 +333,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
tmp_fil->vlan_id == vlan_id) {
- if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 3c5494afd3c0..c865a4be05ee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -435,7 +435,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
skb_checksum_none_assert(qca->rx_skb);
- netif_rx_ni(qca->rx_skb);
+ netif_rx(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 27c4f43176aa..26646cb6a20a 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -108,7 +108,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
skb_checksum_none_assert(qca->rx_skb);
- netif_rx_ni(qca->rx_skb);
+ netif_rx(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
netdev->mtu +
VLAN_ETH_HLEN);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index bfbd7847f946..a313242a762e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -207,7 +207,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
dev = skb->dev;
port = rmnet_get_port_rcu(dev);
if (unlikely(!port)) {
- atomic_long_inc(&skb->dev->rx_nohandler);
+ dev_core_stats_rx_nohandler_inc(skb->dev);
kfree_skb(skb);
goto done;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 3676976c875b..ba194698cc14 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -298,7 +298,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
- u8 *padbytes;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
@@ -323,8 +322,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
if (skb_tailroom(skb) < padding)
return NULL;
- padbytes = (u8 *)skb_put(skb, padding);
- memset(padbytes, 0, padding);
+ skb_put_zero(skb, padding);
done:
map_header->pkt_len = htons(map_datalen + padding);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 19e2621e0645..33f5c5698ccb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1397,8 +1397,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
- rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
+
+ if (tp->dash_type == RTL_DASH_NONE) {
+ rtl_set_d3_pll_down(tp, !wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
+ }
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2667,10 +2670,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
- rtl_eri_set_bits(tp, 0xd4, 0x1f80);
- break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2678,13 +2678,48 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
}
}
+static void rtl_disable_exit_l1(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
+ rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
+ break;
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
/* Don't enable ASPM in the chip if OS can't control ASPM */
if (enable && tp->aspm_manageable) {
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ /* reset ephy tx/rx disable timer */
+ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
+ /* chip can trigger L1.2 */
+ r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2));
+ break;
+ default:
+ break;
+ }
} else {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
+ break;
+ default:
+ break;
+ }
+
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
}
@@ -4683,7 +4718,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl_pci_commit(tp);
rtl8169_cleanup(tp, true);
-
+ rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
}
@@ -4843,8 +4878,6 @@ static void rtl8169_net_suspend(struct rtl8169_private *tp)
rtl8169_down(tp);
}
-#ifdef CONFIG_PM
-
static int rtl8169_runtime_resume(struct device *dev)
{
struct rtl8169_private *tp = dev_get_drvdata(dev);
@@ -4860,7 +4893,7 @@ static int rtl8169_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused rtl8169_suspend(struct device *device)
+static int rtl8169_suspend(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4873,7 +4906,7 @@ static int __maybe_unused rtl8169_suspend(struct device *device)
return 0;
}
-static int __maybe_unused rtl8169_resume(struct device *device)
+static int rtl8169_resume(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4908,6 +4941,9 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
+ if (tp->dash_type != RTL_DASH_NONE)
+ return -EBUSY;
+
if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
pm_schedule_suspend(device, 10000);
@@ -4915,13 +4951,11 @@ static int rtl8169_runtime_idle(struct device *device)
}
static const struct dev_pm_ops rtl8169_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
- SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
- rtl8169_runtime_idle)
+ SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
+ RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
+ rtl8169_runtime_idle)
};
-#endif /* CONFIG_PM */
-
static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
{
/* WoL fails with 8168b when the receiver is disabled. */
@@ -4950,7 +4984,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF) {
+ if (system_state == SYSTEM_POWER_OFF &&
+ tp->dash_type == RTL_DASH_NONE) {
if (tp->saved_wolopts)
rtl_wol_shutdown_quirk(tp);
@@ -5255,6 +5290,16 @@ done:
rtl_rar_set(tp, mac_addr);
}
+/* register is set if system vendor successfully tested ASPM 1.2 */
+static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
+{
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_60 &&
+ r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
+ return true;
+
+ return false;
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5333,7 +5378,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Chips from RTL8168h partially have issues with L1.2, but seem
* to work fine with L1 and L1.1.
*/
- if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ if (rtl_aspm_is_safe(tp))
+ rc = 0;
+ else if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
@@ -5409,7 +5456,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
- rtl_set_d3_pll_down(tp, true);
+ if (tp->dash_type == RTL_DASH_NONE) {
+ rtl_set_d3_pll_down(tp, true);
+ } else {
+ rtl_set_d3_pll_down(tp, false);
+ dev->wol_enabled = 1;
+ }
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
@@ -5460,9 +5512,7 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
-#ifdef CONFIG_PM
- .driver.pm = &rtl8169_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&rtl8169_pm_ops),
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index f7ad5487879b..15c295f90196 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -429,15 +429,6 @@ static const struct phy_reg rtl8168d_1_phy_reg_init_0[] = {
{ 0x0d, 0xf880 }
};
-static const struct phy_reg rtl8168d_1_phy_reg_init_1[] = {
- { 0x1f, 0x0002 },
- { 0x05, 0x669a },
- { 0x1f, 0x0005 },
- { 0x05, 0x8330 },
- { 0x06, 0x669a },
- { 0x1f, 0x0002 }
-};
-
static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
struct phy_device *phydev,
u16 val)
@@ -455,6 +446,29 @@ static void rtl8168d_apply_firmware_cond(struct rtl8169_private *tp,
r8169_apply_firmware(tp);
}
+static void rtl8168d_1_common(struct phy_device *phydev)
+{
+ u16 val;
+
+ phy_write_paged(phydev, 0x0002, 0x05, 0x669a);
+ r8168d_phy_param(phydev, 0x8330, 0xffff, 0x669a);
+ phy_write(phydev, 0x1f, 0x0002);
+
+ val = phy_read(phydev, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ static const u16 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ phy_write(phydev, 0x0d, val | set[i]);
+ }
+}
+
static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -469,25 +483,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp,
phy_modify(phydev, 0x0c, 0x5d00, 0xa200);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
-
- val = phy_read(phydev, 0x0d);
-
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- phy_write(phydev, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- phy_write(phydev, 0x0d, val | set[i]);
- }
+ rtl8168d_1_common(phydev);
} else {
phy_write_paged(phydev, 0x0002, 0x05, 0x6662);
r8168d_phy_param(phydev, 0x8330, 0xffff, 0x6662);
@@ -513,24 +509,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
- int val;
-
- rtl_writephy_batch(phydev, rtl8168d_1_phy_reg_init_1);
-
- val = phy_read(phydev, 0x0d);
- if ((val & 0x00ff) != 0x006c) {
- static const u32 set[] = {
- 0x0065, 0x0066, 0x0067, 0x0068,
- 0x0069, 0x006a, 0x006b, 0x006c
- };
- int i;
-
- phy_write(phydev, 0x1f, 0x0002);
-
- val &= 0xff00;
- for (i = 0; i < ARRAY_SIZE(set); i++)
- phy_write(phydev, 0x0d, val | set[i]);
- }
+ rtl8168d_1_common(phydev);
} else {
phy_write_paged(phydev, 0x0002, 0x05, 0x2642);
r8168d_phy_param(phydev, 0x8330, 0xffff, 0x2642);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b215cde68e10..525d66f71f02 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -475,7 +475,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -1432,11 +1432,7 @@ static int ravb_phy_init(struct net_device *ndev)
* at this time.
*/
if (soc_device_match(r8a7795es10)) {
- err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
- goto err_phy_disconnect;
- }
+ phy_set_max_speed(phydev, SPEED_100);
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
@@ -1457,8 +1453,6 @@ static int ravb_phy_init(struct net_device *ndev)
return 0;
-err_phy_disconnect:
- phy_disconnect(phydev);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
@@ -2854,7 +2848,6 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int ret;
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2863,9 +2856,7 @@ static int ravb_wol_restore(struct net_device *ndev)
/* Disable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
- ret = ravb_close(ndev);
- if (ret < 0)
- return ret;
+ ravb_close(ndev);
return disable_irq_wake(priv->emac_irq);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d947a628e166..67ade78fb767 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2026,14 +2026,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
- int err = phy_set_max_speed(phydev, SPEED_100);
- if (err) {
- netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
- phy_disconnect(phydev);
- return err;
- }
- }
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ phy_set_max_speed(phydev, SPEED_100);
phy_attached_info(phydev);
@@ -3450,9 +3444,7 @@ static int sh_eth_wol_restore(struct net_device *ndev)
* both be reset and all registers restored. This is what
* happens during suspend and resume without WoL enabled.
*/
- ret = sh_eth_close(ndev);
- if (ret < 0)
- return ret;
+ sh_eth_close(ndev);
ret = sh_eth_open(ndev);
if (ret < 0)
return ret;
@@ -3464,7 +3456,7 @@ static int sh_eth_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
@@ -3483,7 +3475,7 @@ static int sh_eth_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
+ int ret;
if (!netif_running(ndev))
return 0;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 2881f5b2b5f4..407a1f8e3059 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -127,7 +127,7 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
- if (phy_init_eee(ndev->phydev, 1))
+ if (phy_init_eee(ndev->phydev, true))
return false;
priv->eee_active = 1;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index cf366ed2557c..50d535981a35 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3990,6 +3990,30 @@ static unsigned int ef10_check_caps(const struct efx_nic *efx,
}
}
+static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
+{
+ unsigned int ret = EFX_RECYCLE_RING_SIZE_10G;
+
+ /* There is no difference between PFs and VFs. The side is based on
+ * the maximum link speed of a given NIC.
+ */
+ switch (efx->pci_dev->device & 0xfff) {
+ case 0x0903: /* Farmingdale can do up to 10G */
+ break;
+ case 0x0923: /* Greenport can do up to 40G */
+ case 0x0a03: /* Medford can do up to 40G */
+ ret *= 4;
+ break;
+ default: /* Medford2 can do up to 100G */
+ ret *= 10;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC64))
+ ret *= 4;
+
+ return ret;
+}
+
#define EF10_OFFLOAD_FEATURES \
(NETIF_F_IP_CSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -4106,6 +4130,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -4243,4 +4268,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.check_caps = ef10_check_caps,
.print_additional_fwver = efx_ef10_print_additional_fwver,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index f79b14a119ae..a07cbf45a326 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -23,6 +23,7 @@
#include "ef100_rx.h"
#include "ef100_tx.h"
#include "ef100_netdev.h"
+#include "rx_common.h"
#define EF100_MAX_VIS 4096
#define EF100_NUM_MCDI_BUFFERS 1
@@ -696,6 +697,12 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
}
}
+static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
+{
+ /* Maximum link speed for Riverhead is 100G */
+ return 10 * EFX_RECYCLE_RING_SIZE_10G;
+}
+
/* NIC level access functions
*/
#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
@@ -770,6 +777,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
.rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
.reconfigure_mac = ef100_reconfigure_mac,
.reconfigure_port = efx_mcdi_port_reconfigure,
@@ -849,6 +857,7 @@ const struct efx_nic_type ef100_vf_nic_type = {
.rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
.rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
.reconfigure_mac = ef100_reconfigure_mac,
.test_nvram = efx_new_mcdi_nvram_test_all,
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index ead550ae2709..d6fdcdc530ca 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -78,31 +78,48 @@ static const struct efx_channel_type efx_default_channel_type = {
* INTERRUPTS
*************/
-static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
{
- cpumask_var_t thread_mask;
+ cpumask_var_t filter_mask;
unsigned int count;
int cpu;
+ if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ cpumask_copy(filter_mask, cpu_online_mask);
+ if (local_node) {
+ int numa_node = pcibus_to_node(efx->pci_dev->bus);
+
+ cpumask_and(filter_mask, filter_mask, cpumask_of_node(numa_node));
+ }
+
+ count = 0;
+ for_each_cpu(cpu, filter_mask) {
+ ++count;
+ cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
+ }
+
+ free_cpumask_var(filter_mask);
+
+ return count;
+}
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ unsigned int count;
+
if (rss_cpus) {
count = rss_cpus;
} else {
- if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
- netif_warn(efx, probe, efx->net_dev,
- "RSS disabled due to allocation failure\n");
- return 1;
- }
+ count = count_online_cores(efx, true);
- count = 0;
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, thread_mask)) {
- ++count;
- cpumask_or(thread_mask, thread_mask,
- topology_sibling_cpumask(cpu));
- }
- }
-
- free_cpumask_var(thread_mask);
+ /* If no online CPUs in local node, fallback to any online CPUs */
+ if (count == 0)
+ count = count_online_cores(efx, false);
}
if (count > EFX_MAX_RX_QUEUES) {
@@ -369,12 +386,20 @@ int efx_probe_interrupts(struct efx_nic *efx)
#if defined(CONFIG_SMP)
void efx_set_interrupt_affinity(struct efx_nic *efx)
{
+ int numa_node = pcibus_to_node(efx->pci_dev->bus);
+ const struct cpumask *numa_mask = cpumask_of_node(numa_node);
struct efx_channel *channel;
unsigned int cpu;
+ /* If no online CPUs in local node, fallback to any online CPU */
+ if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
+ numa_mask = cpu_online_mask;
+
+ cpu = -1;
efx_for_each_channel(channel, efx) {
- cpu = cpumask_local_spread(channel->channel,
- pcibus_to_node(efx->pci_dev->bus));
+ cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first_and(cpu_online_mask, numa_mask);
irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
}
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cc15ee8812d9..c75dc75e2857 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1282,6 +1282,7 @@ struct efx_udp_tunnel {
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @print_additional_fwver: Dump NIC-specific additional FW version info
* @sensor_event: Handle a sensor event from MCDI
+ * @rx_recycle_ring_size: Size of the RX recycle ring
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1460,6 +1461,7 @@ struct efx_nic_type {
size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
size_t len);
void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
+ unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index b9cafe9cd568..0cef35c0c559 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -195,6 +195,11 @@ static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
efx->type->sensor_event(efx, ev);
}
+static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
+{
+ return efx->type->rx_recycle_ring_size(efx);
+}
+
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 633ca77a26fd..1b22c7be0088 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -23,13 +23,6 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
-/* Number of RX buffers to recycle pages for. When creating the RX page recycle
- * ring, this number is divided by the number of buffers per page to calculate
- * the number of pages to store in the RX page recycle ring.
- */
-#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
-#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
-
/* RX maximum head room required.
*
* This must be at least 1 to prevent overflow, plus one packet-worth
@@ -141,16 +134,7 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
unsigned int bufs_in_recycle_ring, page_ring_size;
struct efx_nic *efx = rx_queue->efx;
- /* Set the RX recycle ring size */
-#ifdef CONFIG_PPC64
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
-#else
- if (iommu_present(&pci_bus_type))
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
- else
- bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
-#endif /* CONFIG_PPC64 */
-
+ bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
index 207ccd8ba062..fbd2769307f9 100644
--- a/drivers/net/ethernet/sfc/rx_common.h
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -18,6 +18,12 @@
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_10G 256
+
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 16347a6d0c47..ce3060e15b54 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
#include "mcdi_port_common.h"
#include "selftest.h"
#include "siena_sriov.h"
+#include "rx_common.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -958,6 +959,12 @@ static unsigned int siena_check_caps(const struct efx_nic *efx,
return 0;
}
+static unsigned int efx_siena_recycle_ring_size(const struct efx_nic *efx)
+{
+ /* Maximum link speed is 10G */
+ return EFX_RECYCLE_RING_SIZE_10G;
+}
+
/**************************************************************************
*
* Revision-dependent attributes used by efx.c and nic.c
@@ -1098,4 +1105,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_hash_key_size = 16,
.check_caps = siena_check_caps,
.sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_siena_recycle_ring_size,
};
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 556bd353dd42..b0c5a44785fa 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1044,7 +1044,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
"rx failed to build skb\n");
break;
}
- page_pool_release_page(dring->page_pool, page);
+ skb_mark_for_recycle(skb);
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 8e8778cfbbad..63754a9c4ba7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -383,10 +383,10 @@ static int intel_crosststamp(ktime_t *device,
/* Repeat until the timestamps are from the FIFO last segment */
for (i = 0; i < num_snapshot; i++) {
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
*device = ns_to_ktime(ptp_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
*system = convert_art_to_tsc(art_time);
}
@@ -721,6 +721,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -740,7 +741,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -755,7 +755,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
{
plat->bus_id = 2;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -1160,6 +1159,7 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
+#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
@@ -1177,6 +1177,7 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 58c0feaa8131..6ff88df58767 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -9,7 +9,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -40,6 +39,33 @@
#define ETH_FINE_DLY_GTXC BIT(1)
#define ETH_FINE_DLY_RXC BIT(0)
+/* Peri Configuration register for mt8195 */
+#define MT8195_PERI_ETH_CTRL0 0xFD0
+#define MT8195_RMII_CLK_SRC_INTERNAL BIT(28)
+#define MT8195_RMII_CLK_SRC_RXC BIT(27)
+#define MT8195_ETH_INTF_SEL GENMASK(26, 24)
+#define MT8195_RGMII_TXC_PHASE_CTRL BIT(22)
+#define MT8195_EXT_PHY_MODE BIT(21)
+#define MT8195_DLY_GTXC_INV BIT(12)
+#define MT8195_DLY_GTXC_ENABLE BIT(5)
+#define MT8195_DLY_GTXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL1 0xFD4
+#define MT8195_DLY_RXC_INV BIT(25)
+#define MT8195_DLY_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_TXC_INV BIT(12)
+#define MT8195_DLY_TXC_ENABLE BIT(5)
+#define MT8195_DLY_TXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL2 0xFD8
+#define MT8195_DLY_RMII_RXC_INV BIT(25)
+#define MT8195_DLY_RMII_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RMII_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_RMII_TXC_INV BIT(12)
+#define MT8195_DLY_RMII_TXC_ENABLE BIT(5)
+#define MT8195_DLY_RMII_TXC_STAGES GENMASK(4, 0)
+
struct mac_delay_struct {
u32 tx_delay;
u32 rx_delay;
@@ -50,19 +76,21 @@ struct mac_delay_struct {
struct mediatek_dwmac_plat_data {
const struct mediatek_dwmac_variant *variant;
struct mac_delay_struct mac_delay;
+ struct clk *rmii_internal_clk;
struct clk_bulk_data *clks;
- struct device_node *np;
struct regmap *peri_regmap;
+ struct device_node *np;
struct device *dev;
phy_interface_t phy_mode;
- int num_clks_to_config;
bool rmii_clk_from_mac;
bool rmii_rxc;
+ bool mac_wol;
};
struct mediatek_dwmac_variant {
int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+ void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
/* clock ids to be requested */
const char * const *clk_list;
@@ -75,7 +103,11 @@ struct mediatek_dwmac_variant {
/* list of clocks required for mac */
static const char * const mt2712_dwmac_clk_l[] = {
- "axi", "apb", "mac_main", "ptp_ref", "rmii_internal"
+ "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static const char * const mt8195_dwmac_clk_l[] = {
+ "axi", "apb", "mac_cg", "mac_main", "ptp_ref"
};
static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
@@ -84,23 +116,12 @@ static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
u32 intf_val = 0;
- /* The clock labeled as "rmii_internal" in mt2712_dwmac_clk_l is needed
- * only in RMII(when MAC provides the reference clock), and useless for
- * RGMII/MII/RMII(when PHY provides the reference clock).
- * num_clks_to_config indicates the real number of clocks should be
- * configured, equals to (plat->variant->num_clks - 1) in default for all the case,
- * then +1 for rmii_clk_from_mac case.
- */
- plat->num_clks_to_config = plat->variant->num_clks - 1;
-
/* select phy interface in top control domain */
switch (plat->phy_mode) {
case PHY_INTERFACE_MODE_MII:
intf_val |= PHY_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- if (plat->rmii_clk_from_mac)
- plat->num_clks_to_config++;
intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -268,6 +289,193 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
.tx_delay_max = 17600,
};
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
+ int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (rmii_rxc | rmii_clk_from_mac);
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ /* MT8195 only support external PHY */
+ intf_val |= MT8195_EXT_PHY_MODE;
+
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL0, intf_val);
+
+ return 0;
+}
+
+static void mt8195_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay /= 290;
+ mac_delay->rx_delay /= 290;
+}
+
+static void mt8195_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay *= 290;
+ mac_delay->rx_delay *= 290;
+}
+
+static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 gtxc_delay_val = 0, delay_val = 0, rmii_delay_val = 0;
+
+ mt8195_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by RMII_TXC delay macro circuit.
+ * The ingress timing can be adjusted by RMII_RXC delay macro circuit.
+ */
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_ENABLE,
+ !!mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_STAGES,
+ mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_INV,
+ mac_delay->tx_inv);
+
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_STAGES,
+ mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV,
+ mac_delay->rx_inv);
+ }
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_STAGES,
+ gtxc_delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL1, delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL2, rmii_delay_val);
+
+ mt8195_delay_stage2ps(plat);
+
+ return 0;
+}
+
+static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = priv;
+
+ if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
+ /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
+ * when link speed is 1Gbps with RGMII interface,
+ * Fall back to delay macro circuit for 10/100Mbps link speed.
+ */
+ if (speed == SPEED_1000)
+ regmap_update_bits(priv_plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_STAGES,
+ MT8195_RGMII_TXC_PHASE_CTRL);
+ else
+ mt8195_set_delay(priv_plat);
+ }
+}
+
+static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
+ .dwmac_set_phy_interface = mt8195_set_interface,
+ .dwmac_set_delay = mt8195_set_delay,
+ .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
+ .clk_list = mt8195_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
+ .dma_bit_mask = 35,
+ .rx_delay_max = 9280,
+ .tx_delay_max = 9280,
+};
+
static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -308,6 +516,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
+ plat->mac_wol = of_property_read_bool(plat->np, "mediatek,mac-wol");
return 0;
}
@@ -315,18 +524,34 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
{
const struct mediatek_dwmac_variant *variant = plat->variant;
- int i, num = variant->num_clks;
+ int i, ret;
- plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL);
+ plat->clks = devm_kcalloc(plat->dev, variant->num_clks, sizeof(*plat->clks), GFP_KERNEL);
if (!plat->clks)
return -ENOMEM;
- for (i = 0; i < num; i++)
+ for (i = 0; i < variant->num_clks; i++)
plat->clks[i].id = variant->clk_list[i];
- plat->num_clks_to_config = variant->num_clks;
+ ret = devm_clk_bulk_get(plat->dev, variant->num_clks, plat->clks);
+ if (ret)
+ return ret;
+
+ /* The clock labeled as "rmii_internal" is needed only in RMII(when
+ * MAC provides the reference clock), and useless for RGMII/MII or
+ * RMII(when PHY provides the reference clock).
+ * So, "rmii_internal" clock is got and configured only when
+ * reference clock of RMII is from MAC.
+ */
+ if (plat->rmii_clk_from_mac) {
+ plat->rmii_internal_clk = devm_clk_get(plat->dev, "rmii_internal");
+ if (IS_ERR(plat->rmii_internal_clk))
+ ret = PTR_ERR(plat->rmii_internal_clk);
+ } else {
+ plat->rmii_internal_clk = NULL;
+ }
- return devm_clk_bulk_get(plat->dev, num, plat->clks);
+ return ret;
}
static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
@@ -335,44 +560,117 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
const struct mediatek_dwmac_variant *variant = plat->variant;
int ret;
- ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask));
- if (ret) {
- dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret);
- return ret;
+ if (variant->dwmac_set_phy_interface) {
+ ret = variant->dwmac_set_phy_interface(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ return ret;
+ }
}
- ret = variant->dwmac_set_phy_interface(plat);
- if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
- return ret;
+ if (variant->dwmac_set_delay) {
+ ret = variant->dwmac_set_delay(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ return ret;
+ }
}
- ret = variant->dwmac_set_delay(plat);
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
return ret;
}
- ret = clk_bulk_prepare_enable(plat->num_clks_to_config, plat->clks);
+ ret = clk_prepare_enable(plat->rmii_internal_clk);
if (ret) {
- dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
- return ret;
+ dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+ goto err_clk;
}
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
return 0;
+
+err_clk:
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ return ret;
}
static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
- clk_bulk_disable_unprepare(plat->num_clks_to_config, plat->clks);
+ clk_disable_unprepare(plat->rmii_internal_clk);
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+}
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+static int mediatek_dwmac_clks_config(void *priv, bool enabled)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(plat->rmii_internal_clk);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(plat->rmii_internal_clk);
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ }
+
+ return ret;
+}
+
+static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct mediatek_dwmac_plat_data *priv_plat)
+{
+ int i;
+
+ plat->interface = priv_plat->phy_mode;
+ plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ plat->riwt_off = 1;
+ plat->maxmtu = ETH_DATA_LEN;
+ plat->addr64 = priv_plat->variant->dma_bit_mask;
+ plat->bsp_priv = priv_plat;
+ plat->init = mediatek_dwmac_init;
+ plat->exit = mediatek_dwmac_exit;
+ plat->clks_config = mediatek_dwmac_clks_config;
+ if (priv_plat->variant->dwmac_fix_mac_speed)
+ plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
+
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
+ }
+
+ return 0;
}
static int mediatek_dwmac_probe(struct platform_device *pdev)
@@ -411,15 +709,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->interface = priv_plat->phy_mode;
- plat_dat->has_gmac4 = 1;
- plat_dat->has_gmac = 0;
- plat_dat->pmt = 0;
- plat_dat->riwt_off = 1;
- plat_dat->maxmtu = ETH_DATA_LEN;
- plat_dat->bsp_priv = priv_plat;
- plat_dat->init = mediatek_dwmac_init;
- plat_dat->exit = mediatek_dwmac_exit;
+ mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
mediatek_dwmac_init(pdev, priv_plat);
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
@@ -434,6 +724,8 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
static const struct of_device_id mediatek_dwmac_match[] = {
{ .compatible = "mediatek,mt2712-gmac",
.data = &mt2712_gmac_variant },
+ { .compatible = "mediatek,mt8195-gmac",
+ .data = &mt8195_gmac_variant },
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 2ffa0a11eea5..0cc28c79cc61 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -78,6 +78,7 @@ struct ethqos_emac_por {
struct ethqos_emac_driver_data {
const struct ethqos_emac_por *por;
unsigned int num_por;
+ bool rgmii_config_looback_en;
};
struct qcom_ethqos {
@@ -90,6 +91,7 @@ struct qcom_ethqos {
const struct ethqos_emac_por *por;
unsigned int num_por;
+ bool rgmii_config_looback_en;
};
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
@@ -181,6 +183,22 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
.por = emac_v2_3_0_por,
.num_por = ARRAY_SIZE(emac_v2_3_0_por),
+ .rgmii_config_looback_en = true,
+};
+
+static const struct ethqos_emac_por emac_v2_1_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x40C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_driver_data emac_v2_1_0_data = {
+ .por = emac_v2_1_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_1_0_por),
+ .rgmii_config_looback_en = false,
};
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
@@ -297,8 +315,12 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
@@ -331,8 +353,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+
break;
case SPEED_10:
@@ -504,6 +531,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
+ ethqos->rgmii_config_looback_en = data->rgmii_config_looback_en;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@@ -558,6 +586,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
static const struct of_device_id qcom_ethqos_match[] = {
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
+ { .compatible = "qcom,sm8150-ethqos", .data = &emac_v2_1_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 09644ab0d87a..f86cc83003f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -16,6 +16,7 @@
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -57,7 +58,6 @@ struct emac_variant {
};
/* struct sunxi_priv_data - hold all sunxi private data
- * @tx_clk: reference to MAC TX clock
* @ephy_clk: reference to the optional EPHY clock for the internal PHY
* @regulator: reference to the optional regulator
* @rst_ephy: reference to the optional EPHY reset for the internal PHY
@@ -68,7 +68,6 @@ struct emac_variant {
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
- struct clk *tx_clk;
struct clk *ephy_clk;
struct regulator *regulator;
struct reset_control *rst_ephy;
@@ -579,22 +578,14 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
}
}
- ret = clk_prepare_enable(gmac->tx_clk);
- if (ret) {
- dev_err(&pdev->dev, "Could not enable AHB clock\n");
- goto err_disable_regulator;
- }
-
if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
if (ret)
- goto err_disable_clk;
+ goto err_disable_regulator;
}
return 0;
-err_disable_clk:
- clk_disable_unprepare(gmac->tx_clk);
err_disable_regulator:
if (gmac->regulator)
regulator_disable(gmac->regulator);
@@ -1043,8 +1034,6 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
if (gmac->variant->soc_has_internal_phy)
sun8i_dwmac_unpower_internal_phy(gmac);
- clk_disable_unprepare(gmac->tx_clk);
-
if (gmac->regulator)
regulator_disable(gmac->regulator);
}
@@ -1167,12 +1156,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return -EINVAL;
}
- gmac->tx_clk = devm_clk_get(dev, "stmmaceth");
- if (IS_ERR(gmac->tx_clk)) {
- dev_err(dev, "Could not get TX clock\n");
- return PTR_ERR(gmac->tx_clk);
- }
-
/* Optional regulator for PHY */
gmac->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(gmac->regulator)) {
@@ -1254,6 +1237,12 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
+ /* the MAC is runtime suspended after stmmac_dvr_probe(), so we
+ * need to ensure the MAC resume back before other operations such
+ * as reset.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
/* The mux must be registered after parent MDIO
* so after stmmac_dvr_probe()
*/
@@ -1272,12 +1261,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
goto dwmac_remove;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
dwmac_mux:
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
dwmac_remove:
+ pm_runtime_put_noidle(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5b195d5051d6..57970ae2178d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -263,7 +263,7 @@ struct stmmac_priv {
u32 adv_ts;
int use_riwt;
int irq_wake;
- spinlock_t ptp_lock;
+ rwlock_t ptp_lock;
/* Protects auxiliary snapshot registers from concurrent access. */
struct mutex aux_ts_lock;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index a7ec9f4d46ce..22fea0f67245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -196,9 +196,9 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
GMAC_TIMESTAMP_ATSNS_SHIFT;
for (i = 0; i < num_snapshot; i++) {
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = ptp_time;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 422e3225f476..4a4b3651ab3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -938,105 +938,15 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
-static void stmmac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- int tx_cnt = priv->plat->tx_queues_to_use;
- int max_speed = priv->plat->max_speed;
-
- phylink_set(mac_supported, 10baseT_Half);
- phylink_set(mac_supported, 10baseT_Full);
- phylink_set(mac_supported, 100baseT_Half);
- phylink_set(mac_supported, 100baseT_Full);
- phylink_set(mac_supported, 1000baseT_Half);
- phylink_set(mac_supported, 1000baseT_Full);
- phylink_set(mac_supported, 1000baseKX_Full);
-
- phylink_set(mac_supported, Autoneg);
- phylink_set(mac_supported, Pause);
- phylink_set(mac_supported, Asym_Pause);
- phylink_set_port_modes(mac_supported);
-
- /* Cut down 1G if asked to */
- if ((max_speed > 0) && (max_speed < 1000)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- } else if (priv->plat->has_gmac4) {
- if (!max_speed || max_speed >= 2500) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- } else if (priv->plat->has_xgmac) {
- if (!max_speed || (max_speed >= 2500)) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- if (!max_speed || (max_speed >= 5000)) {
- phylink_set(mac_supported, 5000baseT_Full);
- }
- if (!max_speed || (max_speed >= 10000)) {
- phylink_set(mac_supported, 10000baseSR_Full);
- phylink_set(mac_supported, 10000baseLR_Full);
- phylink_set(mac_supported, 10000baseER_Full);
- phylink_set(mac_supported, 10000baseLRM_Full);
- phylink_set(mac_supported, 10000baseT_Full);
- phylink_set(mac_supported, 10000baseKX4_Full);
- phylink_set(mac_supported, 10000baseKR_Full);
- }
- if (!max_speed || (max_speed >= 25000)) {
- phylink_set(mac_supported, 25000baseCR_Full);
- phylink_set(mac_supported, 25000baseKR_Full);
- phylink_set(mac_supported, 25000baseSR_Full);
- }
- if (!max_speed || (max_speed >= 40000)) {
- phylink_set(mac_supported, 40000baseKR4_Full);
- phylink_set(mac_supported, 40000baseCR4_Full);
- phylink_set(mac_supported, 40000baseSR4_Full);
- phylink_set(mac_supported, 40000baseLR4_Full);
- }
- if (!max_speed || (max_speed >= 50000)) {
- phylink_set(mac_supported, 50000baseCR2_Full);
- phylink_set(mac_supported, 50000baseKR2_Full);
- phylink_set(mac_supported, 50000baseSR2_Full);
- phylink_set(mac_supported, 50000baseKR_Full);
- phylink_set(mac_supported, 50000baseSR_Full);
- phylink_set(mac_supported, 50000baseCR_Full);
- phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
- phylink_set(mac_supported, 50000baseDR_Full);
- }
- if (!max_speed || (max_speed >= 100000)) {
- phylink_set(mac_supported, 100000baseKR4_Full);
- phylink_set(mac_supported, 100000baseSR4_Full);
- phylink_set(mac_supported, 100000baseCR4_Full);
- phylink_set(mac_supported, 100000baseLR4_ER4_Full);
- phylink_set(mac_supported, 100000baseKR2_Full);
- phylink_set(mac_supported, 100000baseSR2_Full);
- phylink_set(mac_supported, 100000baseCR2_Full);
- phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
- phylink_set(mac_supported, 100000baseDR2_Full);
- }
- }
- /* Half-Duplex can only work with single queue */
- if (tx_cnt > 1) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- }
-
- linkmode_and(supported, supported, mac_supported);
- linkmode_andnot(supported, supported, mask);
-
- linkmode_and(state->advertising, state->advertising, mac_supported);
- linkmode_andnot(state->advertising, state->advertising, mask);
+ if (!priv->hw->xpcs)
+ return NULL;
- /* If PCS is supported, check which modes it supports. */
- if (priv->hw->xpcs)
- xpcs_validate(priv->hw->xpcs, supported, state);
+ return &priv->hw->xpcs->pcs;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1175,7 +1085,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
- .validate = stmmac_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
@@ -1255,12 +1166,12 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int max_speed = priv->plat->max_speed;
int mode = priv->plat->phy_interface;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.pcs_poll = true;
if (priv->plat->mdio_bus_data)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
@@ -1268,14 +1179,50 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!fwnode)
fwnode = dev_fwnode(priv->device);
+ /* Set the platform/firmware specified interface mode */
+ __set_bit(mode, priv->phylink_config.supported_interfaces);
+
+ /* If we have an xpcs, it defines which PHY interfaces are supported. */
+ if (priv->hw->xpcs)
+ xpcs_get_interfaces(priv->hw->xpcs,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ if (!max_speed || max_speed >= 1000)
+ priv->phylink_config.mac_capabilities |= MAC_1000;
+
+ if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (!max_speed || max_speed >= 5000)
+ priv->phylink_config.mac_capabilities |= MAC_5000FD;
+ if (!max_speed || max_speed >= 10000)
+ priv->phylink_config.mac_capabilities |= MAC_10000FD;
+ if (!max_speed || max_speed >= 25000)
+ priv->phylink_config.mac_capabilities |= MAC_25000FD;
+ if (!max_speed || max_speed >= 40000)
+ priv->phylink_config.mac_capabilities |= MAC_40000FD;
+ if (!max_speed || max_speed >= 50000)
+ priv->phylink_config.mac_capabilities |= MAC_50000FD;
+ if (!max_speed || max_speed >= 100000)
+ priv->phylink_config.mac_capabilities |= MAC_100000FD;
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
- if (priv->hw->xpcs)
- phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
-
priv->phylink = phylink;
return 0;
}
@@ -1721,7 +1668,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
+ int queue;
int ret;
/* RX INITIALIZATION */
@@ -1748,9 +1695,6 @@ err_init_rx_buffers:
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
- if (queue == 0)
- break;
-
queue--;
}
@@ -3328,7 +3272,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
+ netdev_info(priv->dev, "PTP not supported by HW\n");
else if (ret)
netdev_warn(priv->dev, "PTP init failed\n");
else if (ptp_register)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 1c9f02f9c317..e45fb191d8e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -39,9 +39,9 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
diff = div_u64(adj, 1000000000ULL);
addend = neg_adj ? (addend - diff) : (addend + diff);
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_config_addend(priv, priv->ptpaddr, addend);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -86,9 +86,9 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
mutex_unlock(&priv->plat->est->lock);
}
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
/* Caculate new basetime and re-configured EST after PTP time adjust. */
if (est_rst) {
@@ -137,9 +137,9 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
unsigned long flags;
u64 ns = 0;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &ns);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -162,9 +162,9 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
unsigned long flags;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -194,12 +194,12 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ write_lock_irqsave(&priv->ptp_lock, flags);
ret = stmmac_flex_pps_config(priv, priv->ioaddr,
rq->perout.index, cfg, on,
priv->sub_second_inc,
priv->systime_flags);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
case PTP_CLK_REQ_EXTTS:
priv->plat->ext_snapshot_en = on;
@@ -314,7 +314,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
- spin_lock_init(&priv->ptp_lock);
+ rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index be3cb63675a5..9f1759593b94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1777,9 +1777,9 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
if (ret)
return ret;
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
if (!curr_time) {
ret = -EOPNOTSUPP;
@@ -1799,9 +1799,9 @@ static int stmmac_test_tbs(struct stmmac_priv *priv)
goto fail_disable;
/* Check if expected time has elapsed */
- spin_lock_irqsave(&priv->ptp_lock, flags);
+ read_lock_irqsave(&priv->ptp_lock, flags);
stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
- spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
ret = -EINVAL;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index dba9f12efa1c..b04a6a7bf566 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -88,6 +88,7 @@
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
#define cas_page_map(x) kmap_atomic((x))
#define cas_page_unmap(x) kunmap_atomic((x))
@@ -1234,19 +1235,6 @@ static void cas_init_rx_dma(struct cas *cp)
*/
readl(cp->regs + REG_INTR_STATUS_ALIAS);
writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
- if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
- for (i = 1; i < N_RX_COMP_RINGS; i++)
- readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
-
- /* 2 is different from 3 and 4 */
- if (N_RX_COMP_RINGS > 1)
- writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
- cp->regs + REG_PLUS_ALIASN_CLEAR(1));
-
- for (i = 2; i < N_RX_COMP_RINGS; i++)
- writel(INTR_RX_DONE_ALT,
- cp->regs + REG_PLUS_ALIASN_CLEAR(i));
- }
/* set up pause thresholds */
val = CAS_BASE(RX_PAUSE_THRESH_OFF,
@@ -3508,9 +3496,6 @@ enable_rx_done:
if (N_RX_DESC_RINGS > 1)
writel(RX_DESC_RINGN_SIZE(1) - 4,
cp->regs + REG_PLUS_RX_KICK1);
-
- for (i = 1; i < N_RX_COMP_RINGS; i++)
- writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
}
}
@@ -4063,8 +4048,8 @@ static void cas_link_timer(struct timer_list *t)
if (link_transition_timeout != 0 &&
cp->link_transition_jiffies_valid &&
- ((jiffies - cp->link_transition_jiffies) >
- (link_transition_timeout))) {
+ time_is_before_jiffies(cp->link_transition_jiffies +
+ link_transition_timeout)) {
/* One-second counter so link-down workaround doesn't
* cause resets to occur so fast as to fool the switch
* into thinking the link is down.
@@ -4679,7 +4664,7 @@ static void cas_set_msglevel(struct net_device *dev, u32 value)
static int cas_get_regs_len(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
- return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+ return min_t(int, cp->casreg_len, CAS_MAX_REGS);
}
static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ba8ad76313a9..42460c0885fc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -7909,7 +7909,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mismatched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index d45b6bb86f0b..72acdf802258 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -6,7 +6,7 @@
*/
#include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -471,9 +471,7 @@ static void am65_cpsw_get_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = salve->rx_pause ? true : false;
- pause->tx_pause = salve->tx_pause ? true : false;
+ phylink_ethtool_get_pauseparam(salve->phylink, pause);
}
static int am65_cpsw_set_pauseparam(struct net_device *ndev,
@@ -481,18 +479,7 @@ static int am65_cpsw_set_pauseparam(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EINVAL;
-
- if (!phy_validate_pause(salve->phy, pause))
- return -EINVAL;
-
- salve->rx_pause = pause->rx_pause ? true : false;
- salve->tx_pause = pause->tx_pause ? true : false;
-
- phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(salve->phylink, pause);
}
static void am65_cpsw_get_wol(struct net_device *ndev,
@@ -500,11 +487,7 @@ static void am65_cpsw_get_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (salve->phy)
- phy_ethtool_get_wol(salve->phy, wol);
+ phylink_ethtool_get_wol(salve->phylink, wol);
}
static int am65_cpsw_set_wol(struct net_device *ndev,
@@ -512,10 +495,7 @@ static int am65_cpsw_set_wol(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_wol(salve->phy, wol);
+ return phylink_ethtool_set_wol(salve->phylink, wol);
}
static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
@@ -523,11 +503,7 @@ static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy)
- return -EOPNOTSUPP;
-
- phy_ethtool_ksettings_get(salve->phy, ecmd);
- return 0;
+ return phylink_ethtool_ksettings_get(salve->phylink, ecmd);
}
static int
@@ -536,40 +512,28 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_ksettings_set(salve->phy, ecmd);
+ return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_get_eee(salve->phy, edata);
+ return phylink_ethtool_get_eee(salve->phylink, edata);
}
static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_ethtool_set_eee(salve->phy, edata);
+ return phylink_ethtool_set_eee(salve->phylink, edata);
}
static int am65_cpsw_nway_reset(struct net_device *ndev)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
- if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
- return -EOPNOTSUPP;
-
- return phy_restart_aneg(salve->phy);
+ return phylink_ethtool_nway_reset(salve->phylink);
}
static int am65_cpsw_get_regs_len(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 8251d7eb001b..d2747e9db286 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -18,7 +18,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -159,69 +159,6 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
common->pdata.quirks);
}
-void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct phy_device *phy = port->slave.phy;
- u32 mac_control = 0;
-
- if (!phy)
- return;
-
- if (phy->link) {
- mac_control = CPSW_SL_CTL_GMII_EN;
-
- if (phy->speed == 1000)
- mac_control |= CPSW_SL_CTL_GIG;
- if (phy->speed == 10 && phy_interface_is_rgmii(phy))
- /* Can be used with in band mode only */
- mac_control |= CPSW_SL_CTL_EXT_EN;
- if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
- mac_control |= CPSW_SL_CTL_IFCTL_A;
- if (phy->duplex)
- mac_control |= CPSW_SL_CTL_FULLDUPLEX;
-
- /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
-
- /* rx_pause/tx_pause */
- if (port->slave.rx_pause)
- mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
-
- if (port->slave.tx_pause)
- mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
-
- cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
-
- /* enable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
-
- am65_cpsw_qos_link_up(ndev, phy->speed);
- netif_tx_wake_all_queues(ndev);
- } else {
- int tmo;
-
- /* disable forwarding */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
-
- cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
-
- tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
- dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
- cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
- tmo);
-
- cpsw_sl_ctl_reset(port->slave.mac_sl);
-
- am65_cpsw_qos_link_down(ndev);
- netif_tx_stop_all_queues(ndev);
- }
-
- phy_print_status(phy);
-}
-
static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
@@ -589,15 +526,11 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
- if (port->slave.phy)
- phy_stop(port->slave.phy);
+ phylink_stop(port->slave.phylink);
netif_tx_stop_all_queues(ndev);
- if (port->slave.phy) {
- phy_disconnect(port->slave.phy);
- port->slave.phy = NULL;
- }
+ phylink_disconnect_phy(port->slave.phylink);
ret = am65_cpsw_nuss_common_stop(common);
if (ret)
@@ -667,25 +600,14 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
if (ret)
goto error_cleanup;
- if (port->slave.phy_node) {
- port->slave.phy = of_phy_connect(ndev,
- port->slave.phy_node,
- &am65_cpsw_nuss_adjust_link,
- 0, port->slave.phy_if);
- if (!port->slave.phy) {
- dev_err(common->dev, "phy %pOF not found on slave %d\n",
- port->slave.phy_node,
- port->port_id);
- ret = -ENODEV;
- goto error_cleanup;
- }
- }
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+ if (ret)
+ goto error_cleanup;
/* restore vlan configurations */
vlan_for_each(ndev, cpsw_restore_vlans, port);
- phy_attached_info(port->slave.phy);
- phy_start(port->slave.phy);
+ phylink_start(port->slave.phylink);
return 0;
@@ -1431,10 +1353,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
return am65_cpsw_nuss_hwtstamp_get(ndev, req);
}
- if (!port->slave.phy)
- return -EOPNOTSUPP;
-
- return phy_mii_ioctl(port->slave.phy, req, cmd);
+ return phylink_mii_ioctl(port->slave.phylink, req, cmd);
}
static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
@@ -1494,6 +1413,81 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
+static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Currently not used */
+}
+
+static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ struct net_device *ndev = port->ndev;
+ int tmo;
+
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ am65_cpsw_qos_link_down(ndev);
+ netif_tx_stop_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ u32 mac_control = CPSW_SL_CTL_GMII_EN;
+ struct net_device *ndev = port->ndev;
+
+ if (speed == SPEED_1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII)
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ if (duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* rx_pause/tx_pause */
+ if (rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ am65_cpsw_qos_link_up(ndev, speed);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = am65_cpsw_nuss_mac_config,
+ .mac_link_down = am65_cpsw_nuss_mac_link_down,
+ .mac_link_up = am65_cpsw_nuss_mac_link_up,
+};
+
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
{
struct am65_cpsw_common *common = port->common;
@@ -1890,27 +1884,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- if (of_phy_is_fixed_link(port_np)) {
- ret = of_phy_register_fixed_link(port_np);
- if (ret) {
- ret = dev_err_probe(dev, ret,
- "failed to register fixed-link phy %pOF\n",
- port_np);
- goto of_node_put;
- }
- port->slave.phy_node = of_node_get(port_np);
- } else {
- port->slave.phy_node =
- of_parse_phandle(port_np, "phy-handle", 0);
- }
-
- if (!port->slave.phy_node) {
- dev_err(dev,
- "slave[%d] no phy found\n", port_id);
- ret = -ENODEV;
- goto of_node_put;
- }
-
+ port->slave.phy_node = port_np;
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -1952,12 +1926,25 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats);
}
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->slave.phylink)
+ phylink_destroy(port->slave.phylink);
+ }
+}
+
static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
+ struct phylink *phylink;
int ret;
port = &common->ports[port_idx];
@@ -1995,6 +1982,20 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+ /* Configuring Phylink */
+ port->slave.phylink_config.dev = &port->ndev->dev;
+ port->slave.phylink_config.type = PHYLINK_NETDEV;
+ port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->slave.phylink_config, dev->fwnode, port->slave.phy_if,
+ &am65_cpsw_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ port->slave.phylink = phylink;
+
/* Disable TX checksum offload by default due to HW bug */
if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
port->ndev->features &= ~NETIF_F_HW_CSUM;
@@ -2761,15 +2762,17 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
- goto err_of_clear;
+ goto err_free_phylink;
pm_runtime_put(dev);
return 0;
+err_free_phylink:
+ am65_cpsw_nuss_phylink_cleanup(common);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@@ -2792,6 +2795,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_phylink_cleanup(common);
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 048ed10143c1..ac945631bf2f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
@@ -30,13 +30,14 @@ struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
struct device_node *phy_node;
- struct phy_device *phy;
phy_interface_t phy_if;
struct phy *ifphy;
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
int port_vlan;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
struct am65_cpsw_port {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
index 599708a3e81d..d4c56da98a6a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -237,15 +237,11 @@ static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
port->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
}
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index aa42141be3c0..a557a477d039 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -364,11 +364,9 @@ int cpsw_ethtool_op_begin(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
int ret;
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(cpsw->dev);
- }
return ret;
}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index a7d97d429e06..ce85f7610273 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -252,15 +252,11 @@ static int cpsw_port_vlans_add(struct cpsw_priv *priv,
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
priv->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 31df3267a01a..4b6aed78d392 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1604,6 +1604,7 @@ static int emac_dev_stop(struct net_device *ndev)
int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
+ int ret = 0;
/* inform the upper layers. */
netif_stop_queue(ndev);
@@ -1618,17 +1619,31 @@ static int emac_dev_stop(struct net_device *ndev)
phy_disconnect(ndev->phydev);
/* Free IRQ */
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
- for (irq_num = res->start; irq_num <= res->end; irq_num++)
- free_irq(irq_num, priv->ndev);
- i++;
+ if (dev_of_node(&priv->pdev->dev)) {
+ do {
+ ret = platform_get_irq_optional(priv->pdev, i);
+ if (ret < 0 && ret != -ENXIO)
+ break;
+ if (ret > 0) {
+ free_irq(ret, priv->ndev);
+ } else {
+ ret = 0;
+ break;
+ }
+ } while (++i);
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
}
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
pm_runtime_put(&priv->pdev->dev);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index b818e4579f6f..16507bff652a 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2082,7 +2082,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->tx_pool_region_id = temp[1];
if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
- dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
+ dev_err(dev, "tx-pool size too small, must be at least %ld\n",
MAX_SKB_FRAGS);
ret = -ENODEV;
goto quit;
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 25739b182ac7..eb39a45de012 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -362,7 +362,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
mse102x_dump_packet(__func__, skb->len, skb->data);
skb->protocol = eth_type_trans(skb, mse->ndev);
- netif_rx_ni(skb);
+ netif_rx(skb);
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index ae24d6b86803..4fd7c39e1123 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -883,7 +883,7 @@ static void w5100_rx_work(struct work_struct *work)
struct sk_buff *skb;
while ((skb = w5100_rx_skb(priv->ndev)))
- netif_rx_ni(skb);
+ netif_rx(skb);
w5100_enable_intr(priv);
}
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 911b5ef9e680..0014729b8865 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 4a73127e10a6..c6395c406418 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -271,7 +271,7 @@ This option defaults to enabled (set) */
#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
-/** MII Mamagement Control register (MGTCR) */
+/* MII Management Control register (MGTCR) */
#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
@@ -283,7 +283,7 @@ This option defaults to enabled (set) */
#define STS_CTRL_APP0_ERR (1 << 31)
#define STS_CTRL_APP0_IRQONEND (1 << 30)
-/* undoccumented */
+/* undocumented */
#define STS_CTRL_APP0_STOPONEND (1 << 29)
#define STS_CTRL_APP0_CMPLT (1 << 28)
#define STS_CTRL_APP0_SOP (1 << 27)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 64c7e26c3b75..869e362e09c1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -361,8 +361,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
- skb = netdev_alloc_skb_ip_align(ndev,
- XTE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
@@ -1008,7 +1009,7 @@ static void ll_temac_recv(struct net_device *ndev)
(skb->len > 64)) {
/* Convert from device endianness (be32) to cpu
- * endiannes, and if necessary swap the bytes
+ * endianness, and if necessary swap the bytes
* (back) for proper IP checksum byte order
* (be16).
*/
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 5b4d153b1492..0f9c88dd1a4a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -119,11 +119,11 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
-/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
-#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
-#define XAXIDMA_DFT_RX_WAITBOUND 254
+#define XAXIDMA_DFT_TX_USEC 50
+#define XAXIDMA_DFT_RX_THRESHOLD 1
+#define XAXIDMA_DFT_RX_USEC 50
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -385,7 +385,9 @@ struct axidma_bd {
* @phy_node: Pointer to device node structure
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
+ * @napi: NAPI control structure
* @pcs_phy: Reference to PCS/PMA PHY if used
+ * @pcs: phylink pcs structure for PCS PHY
* @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
* @axi_clk: AXI4-Lite bus clock
* @misc_clks: Misc ethernet clocks (AXI4-Stream, Ref, MGT clocks)
@@ -394,6 +396,7 @@ struct axidma_bd {
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
+ * @rx_dma_cr: Nominal content of RX DMA control register
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -422,7 +425,9 @@ struct axidma_bd {
* @csum_offload_on_tx_path: Stores the checksum selection on TX side.
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @coalesce_usec_tx: IRQ coalesce delay for TX
*/
struct axienet_local {
struct net_device *ndev;
@@ -433,7 +438,10 @@ struct axienet_local {
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct napi_struct napi;
+
struct mdio_device *pcs_phy;
+ struct phylink_pcs pcs;
bool switch_x_sgmii;
@@ -447,6 +455,8 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
+ u32 rx_dma_cr;
+
struct work_struct dma_err_task;
int tx_irq;
@@ -474,7 +484,9 @@ struct axienet_local {
int csum_offload_on_rx_path;
u32 coalesce_count_rx;
+ u32 coalesce_usec_rx;
u32 coalesce_count_tx;
+ u32 coalesce_usec_tx;
};
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 377c94ec2486..c7eb05e4a6bf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,7 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
- * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
+ * Copyright (c) 2019 - 2022 Calian Advanced Technologies
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -33,7 +33,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
+#include <linux/math64.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -190,7 +190,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
/* If we end up here, tx_bd_v must have been DMA allocated. */
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
@@ -215,18 +215,90 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
if (lp->rx_bd_v[i].cntrl) {
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
lp->max_frm_size, DMA_FROM_DEVICE);
}
}
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
/**
+ * axienet_usec_to_timer - Calculate IRQ delay timer value
+ * @lp: Pointer to the axienet_local structure
+ * @coalesce_usec: Microseconds to convert into timer value
+ */
+static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+{
+ u32 result;
+ u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+
+ if (lp->axi_clk)
+ clk_rate = clk_get_rate(lp->axi_clk);
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
+ (u64)125000000);
+ if (result > 255)
+ result = 255;
+
+ return result;
+}
+
+/**
+ * axienet_dma_start - Set up DMA registers and start DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_start(struct axienet_local *lp)
+{
+ u32 tx_cr;
+
+ /* Start updating the Rx channel control register */
+ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_rx > 1)
+ lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+
+ /* Start updating the Tx channel control register */
+ tx_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_tx > 1)
+ tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ tx_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr);
+}
+
+/**
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
@@ -238,7 +310,6 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
int i;
struct sk_buff *skb;
struct axienet_local *lp = netdev_priv(ndev);
@@ -249,13 +320,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->tx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
return -ENOMEM;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->rx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
@@ -285,9 +356,9 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
lp->rx_bd_v[i].skb = skb;
- addr = dma_map_single(ndev->dev.parent, skb->data,
+ addr = dma_map_single(lp->dev, skb->data,
lp->max_frm_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, addr)) {
+ if (dma_mapping_error(lp->dev, addr)) {
netdev_err(ndev, "DMA mapping error\n");
goto out;
}
@@ -296,50 +367,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
return 0;
out:
@@ -531,13 +559,51 @@ static int __axienet_device_reset(struct axienet_local *lp)
}
/**
+ * axienet_dma_stop - Stop DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_stop(struct axienet_local *lp)
+{
+ int count;
+ u32 cr, sr;
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ synchronize_irq(lp->rx_irq);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ synchronize_irq(lp->tx_irq);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ axienet_lock_mii(lp);
+ __axienet_device_reset(lp);
+ axienet_unlock_mii(lp);
+}
+
+/**
* axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
* @ndev: Pointer to the net_device structure
*
* This function is called to reset and initialize the Axi Ethernet core. This
* is typically called during initialization. It does a reset of the Axi DMA
* Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
- * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
+ * are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
* Returns 0 on success or a negative error number otherwise.
@@ -636,7 +702,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
/* Ensure we see complete descriptor update */
dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
@@ -774,9 +840,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- phys = dma_map_single(ndev->dev.parent, skb->data,
+ phys = dma_map_single(lp->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -790,11 +856,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- phys = dma_map_single(ndev->dev.parent,
+ phys = dma_map_single(lp->dev,
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -833,79 +899,84 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/**
- * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
- * BD processing.
- * @ndev: Pointer to net_device structure.
+ * axienet_poll - Triggered by RX ISR to complete the received BD processing.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of packets to process.
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * Return: Number of RX packets processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
u32 size = 0;
- u32 packets = 0;
+ int packets = 0;
dma_addr_t tail_p = 0;
- struct axienet_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
+ struct sk_buff *skb, *new_skb;
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi);
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
dma_addr_t phys;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
-
/* Ensure we see complete descriptor update */
dma_rmb();
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
- DMA_FROM_DEVICE);
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
-
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(lp->dev, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
- size += length;
- packets++;
+ size += length;
+ packets++;
+ }
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ new_skb = napi_alloc_skb(napi, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
- phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ phys = dma_map_single(lp->dev, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
- netdev_err(ndev, "RX DMA mapping error\n");
+ netdev_err(lp->ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
- return;
+ break;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -913,16 +984,30 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = new_skb;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- ndev->stats.rx_packets += packets;
- ndev->stats.rx_bytes += size;
+ lp->ndev->stats.rx_packets += packets;
+ lp->ndev->stats.rx_bytes += size;
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable RX completion interrupts. This should
+ * cause an immediate interrupt if any RX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ }
+ return packets;
}
/**
@@ -937,41 +1022,27 @@ static void axienet_recv(struct net_device *ndev)
*/
static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Tx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ } else {
+ axienet_start_xmit_done(lp->ndev);
}
-out:
+
return IRQ_HANDLED;
}
@@ -982,46 +1053,40 @@ out:
*
* Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
* processing.
*/
static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ } else {
+ /* Disable further RX completion interrupts and schedule
+ * NAPI receive.
+ */
+ u32 cr = lp->rx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1095,6 +1160,8 @@ static int axienet_open(struct net_device *ndev)
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+ napi_enable(&lp->napi);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
ndev->name, ndev);
@@ -1120,6 +1187,7 @@ err_eth_irq:
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&lp->napi);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
@@ -1139,46 +1207,22 @@ err_tx_irq:
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr, sr;
- int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
+ napi_disable(&lp->napi);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_stop(lp);
axienet_iow(lp, XAE_IE_OFFSET, 0);
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
-
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
-
- /* Do a reset to ensure DMA is really stopped */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
-
cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
@@ -1449,14 +1493,12 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+
+ ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
+ ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
return 0;
}
@@ -1489,8 +1531,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->rx_coalesce_usecs)
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
if (ecoalesce->tx_max_coalesced_frames)
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ if (ecoalesce->tx_coalesce_usecs)
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -1521,7 +1567,8 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
}
static const struct ethtool_ops axienet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1537,78 +1584,78 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ return container_of(pcs, struct axienet_local, pcs);
+}
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
- break;
- default:
- break;
- }
+static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+
+ phylink_mii_c22_pcs_get_state(pcs_phy, state);
}
-static void axienet_mac_an_restart(struct phylink_config *config)
+static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
+ phylink_mii_c22_pcs_an_restart(pcs_phy);
}
-static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t iface)
+static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+ struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
struct axienet_local *lp = netdev_priv(ndev);
int ret;
- switch (iface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (!lp->switch_x_sgmii)
- return 0;
-
- ret = mdiobus_write(lp->pcs_phy->bus,
- lp->pcs_phy->addr,
- XLNX_MII_STD_SELECT_REG,
- iface == PHY_INTERFACE_MODE_SGMII ?
+ if (lp->switch_x_sgmii) {
+ ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
+ interface == PHY_INTERFACE_MODE_SGMII ?
XLNX_MII_STD_SELECT_SGMII : 0);
- if (ret < 0)
- netdev_warn(ndev, "Failed to switch PHY interface: %d\n",
+ if (ret < 0) {
+ netdev_warn(ndev,
+ "Failed to switch PHY interface: %d\n",
ret);
- return ret;
- default:
- return 0;
+ return ret;
+ }
}
+
+ ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
+
+ return ret;
}
-static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static const struct phylink_pcs_ops axienet_pcs_ops = {
+ .pcs_get_state = axienet_pcs_get_state,
+ .pcs_config = axienet_pcs_config,
+ .pcs_an_restart = axienet_pcs_an_restart,
+};
+
+static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
- int ret;
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
- state->interface,
- state->advertising);
- if (ret < 0)
- netdev_warn(ndev, "Failed to configure PCS: %d\n",
- ret);
- break;
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return &lp->pcs;
- default:
- break;
- }
+ return NULL;
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* nothing meaningful to do */
}
static void axienet_mac_link_down(struct phylink_config *config,
@@ -1663,9 +1710,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = phylink_generic_validate,
- .mac_pcs_get_state = axienet_mac_pcs_get_state,
- .mac_an_restart = axienet_mac_an_restart,
- .mac_prepare = axienet_mac_prepare,
+ .mac_select_pcs = axienet_mac_select_pcs,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
@@ -1680,29 +1725,26 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
*/
static void axienet_dma_err_handler(struct work_struct *work)
{
+ u32 i;
u32 axienet_status;
- u32 cr, i;
+ struct axidma_bd *cur_p;
struct axienet_local *lp = container_of(work, struct axienet_local,
dma_err_task);
struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+
+ napi_disable(&lp->napi);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
+
+ axienet_dma_stop(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->cntrl) {
dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, addr,
+ dma_unmap_single(lp->dev, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
@@ -1735,50 +1777,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
axienet_status &= ~XAE_RCW1_RX_MASK;
@@ -1799,6 +1798,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi);
}
/**
@@ -1847,6 +1847,8 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+ netif_napi_add(ndev, &lp->napi, axienet_poll, NAPI_POLL_WEIGHT);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2053,7 +2055,9 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
/* Reset core now that clocks are enabled, prior to accessing MDIO */
ret = __axienet_device_reset(lp);
@@ -2079,12 +2083,12 @@ static int axienet_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto cleanup_mdio;
}
- lp->phylink_config.pcs_poll = true;
+ lp->pcs.ops = &axienet_pcs_ops;
+ lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
- lp->phylink_config.legacy_pre_march2020 = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 77fa2cb03aca..57a24f62e353 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -498,7 +498,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
* @dev: Pointer to the network device instance
* @address: Void pointer to the sockaddr structure
*
- * This function copies the HW address from the sockaddr strucutre to the
+ * This function copies the HW address from the sockaddr structure to the
* net_device structure and updates the address in HW.
*
* Return: Error if the net device is busy or 0 if the addr is set
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index ebd287039a54..5805e4a56385 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1514,10 +1514,9 @@ acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
{
struct acpi_device *device;
bool *found = context;
- int result;
- result = acpi_bus_get_device(obj_handle, &device);
- if (result)
+ device = acpi_fetch_acpi_dev(obj_handle);
+ if (!device)
return AE_OK;
if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c1fdd721a730..7db6c135ac6c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -56,6 +56,7 @@ struct geneve_config {
bool use_udp6_rx_checksums;
bool ttl_inherit;
enum ifla_geneve_df df;
+ bool inner_proto_inherit;
};
/* Pseudo network device */
@@ -251,17 +252,24 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
}
}
- skb_reset_mac_header(skb);
- skb->protocol = eth_type_trans(skb, geneve->dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
if (tun_dst)
skb_dst_set(skb, &tun_dst->dst);
- /* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
- geneve->dev->stats.rx_errors++;
- goto drop;
+ if (gnvh->proto_type == htons(ETH_P_TEB)) {
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, geneve->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source,
+ geneve->dev->dev_addr)) {
+ geneve->dev->stats.rx_errors++;
+ goto drop;
+ }
+ } else {
+ skb_reset_mac_header(skb);
+ skb->dev = geneve->dev;
+ skb->pkt_type = PACKET_HOST;
}
oiph = skb_network_header(skb);
@@ -345,6 +353,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct genevehdr *geneveh;
struct geneve_dev *geneve;
struct geneve_sock *gs;
+ __be16 inner_proto;
int opts_len;
/* Need UDP and Geneve header to be present */
@@ -356,7 +365,11 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(geneveh->ver != GENEVE_VER))
goto drop;
- if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+ inner_proto = geneveh->proto_type;
+
+ if (unlikely((inner_proto != htons(ETH_P_TEB) &&
+ inner_proto != htons(ETH_P_IP) &&
+ inner_proto != htons(ETH_P_IPV6))))
goto drop;
gs = rcu_dereference_sk_user_data(sk);
@@ -367,9 +380,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (!geneve)
goto drop;
+ if (unlikely((!geneve->cfg.inner_proto_inherit &&
+ inner_proto != htons(ETH_P_TEB)))) {
+ geneve->dev->stats.rx_dropped++;
+ goto drop;
+ }
+
opts_len = geneveh->opt_len * 4;
- if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
- htons(ETH_P_TEB),
+ if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
geneve->dev->stats.rx_dropped++;
goto drop;
@@ -717,7 +735,8 @@ static int geneve_stop(struct net_device *dev)
}
static void geneve_build_header(struct genevehdr *geneveh,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 inner_proto)
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
@@ -725,7 +744,7 @@ static void geneve_build_header(struct genevehdr *geneveh,
geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
- geneveh->proto_type = htons(ETH_P_TEB);
+ geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
@@ -734,10 +753,12 @@ static void geneve_build_header(struct genevehdr *geneveh,
static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
const struct ip_tunnel_info *info,
- bool xnet, int ip_hdr_len)
+ bool xnet, int ip_hdr_len,
+ bool inner_proto_inherit)
{
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
struct genevehdr *gnvh;
+ __be16 inner_proto;
int min_headroom;
int err;
@@ -755,8 +776,9 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
goto free_dst;
gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len);
- geneve_build_header(gnvh, info);
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB);
+ geneve_build_header(gnvh, info, inner_proto);
+ skb_set_inner_protocol(skb, inner_proto);
return 0;
free_dst:
@@ -925,7 +947,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(&rt->dst);
return -EMSGSIZE;
}
@@ -959,7 +981,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
}
- err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
+ err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
+ geneve->cfg.inner_proto_inherit);
if (unlikely(err))
return err;
@@ -1021,7 +1044,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(dst);
return -EMSGSIZE;
}
@@ -1038,7 +1061,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = key->ttl;
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
- err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
+ err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
+ geneve->cfg.inner_proto_inherit);
if (unlikely(err))
return err;
@@ -1238,6 +1262,7 @@ static void geneve_setup(struct net_device *dev)
}
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+ [IFLA_GENEVE_UNSPEC] = { .strict_start_type = IFLA_GENEVE_INNER_PROTO_INHERIT },
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
[IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
[IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
@@ -1251,6 +1276,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
+ [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1388,6 +1414,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
dst_cache_reset(&geneve->cfg.info.dst_cache);
memcpy(&geneve->cfg, cfg, sizeof(*cfg));
+ if (geneve->cfg.inner_proto_inherit) {
+ dev->header_ops = NULL;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_NOARP;
+ }
+
err = register_netdevice(dev);
if (err)
return err;
@@ -1561,10 +1595,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
#endif
}
+ if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) {
+ if (changelink) {
+ attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT;
+ goto change_notsup;
+ }
+ cfg->inner_proto_inherit = true;
+ }
+
return 0;
change_notsup:
NL_SET_ERR_MSG_ATTR(extack, data[attrtype],
- "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported");
+ "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported");
return -EOPNOTSUPP;
}
@@ -1740,6 +1782,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
+ nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
0;
}
@@ -1799,6 +1842,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
goto nla_put_failure;
+ if (geneve->cfg.inner_proto_inherit &&
+ nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 24e5c54d06c1..a208e2b1a9af 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -66,13 +66,23 @@ struct gtp_dev {
struct sock *sk0;
struct sock *sk1u;
+ u8 sk_created;
struct net_device *dev;
+ struct net *net;
unsigned int role;
unsigned int hash_size;
struct hlist_head *tid_hash;
struct hlist_head *addr_hash;
+
+ u8 restart_count;
+};
+
+struct echo_info {
+ struct in_addr ms_addr_ip4;
+ struct in_addr peer_addr_ip4;
+ u8 gtp_version;
};
static unsigned int gtp_net_id __read_mostly;
@@ -83,6 +93,16 @@ struct gtp_net {
static u32 gtp_h_initval;
+static struct genl_family gtp_genl_family;
+
+enum gtp_multicast_groups {
+ GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+ [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
static void pdp_context_delete(struct pdp_ctx *pctx);
static inline u32 gtp0_hashfn(u64 tid)
@@ -207,7 +227,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
dev_sw_netstats_rx_add(pctx->dev, skb->len);
- netif_rx(skb);
+ __netif_rx(skb);
return 0;
err:
@@ -215,6 +235,174 @@ err:
return -1;
}
+static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
+ const struct sock *sk,
+ __be32 daddr, __be32 saddr)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = sk->sk_bound_dev_if;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+ fl4->flowi4_tos = RT_CONN_FLAGS(sk);
+ fl4->flowi4_proto = sk->sk_protocol;
+
+ return ip_route_output_key(sock_net(sk), fl4);
+}
+
+/* GSM TS 09.60. 7.3
+ * In all Path Management messages:
+ * - TID: is not used and shall be set to 0.
+ * - Flow Label is not used and shall be set to 0
+ * In signalling messages:
+ * - number: this field is not yet used in signalling messages.
+ * It shall be set to 255 by the sender and shall be ignored
+ * by the receiver
+ * Returns true if the echo req was correct, false otherwise.
+ */
+static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
+{
+ return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
+ gtp0->number != 0xff || gtp0->flow);
+}
+
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
+{
+ int len_pkt, len_hdr;
+
+ hdr->flags = 0x1e; /* v0, GTP-non-prime. */
+ hdr->type = msg_type;
+ /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
+ * are not used and shall be set to 0.
+ */
+ hdr->flow = 0;
+ hdr->tid = 0;
+ hdr->number = 0xff;
+ hdr->spare[0] = 0xff;
+ hdr->spare[1] = 0xff;
+ hdr->spare[2] = 0xff;
+
+ len_pkt = sizeof(struct gtp0_packet);
+ len_hdr = sizeof(struct gtp0_header);
+
+ if (msg_type == GTP_ECHO_RSP)
+ hdr->length = htons(len_pkt - len_hdr);
+ else
+ hdr->length = 0;
+}
+
+static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp0_packet *gtp_pkt;
+ struct gtp0_header *gtp0;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+ __be16 seq;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if (!gtp0_validate_echo_hdr(gtp0))
+ return -1;
+
+ seq = gtp0->seq;
+
+ /* pull GTP and UDP headers */
+ skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
+
+ gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
+ memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
+
+ gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP);
+
+ /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
+ * message shall be copied from the signalling request message
+ * that the GSN is replying to.
+ */
+ gtp_pkt->gtp0_h.seq = seq;
+
+ gtp_pkt->ie.tag = GTPIE_RECOVERY;
+ gtp_pkt->ie.val = gtp->restart_count;
+
+ iph = ip_hdr(skb);
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP0_PORT), htons(GTP0_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
+static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+ int flags, u32 type, struct echo_info echo)
+{
+ void *genlh;
+
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
+ type);
+ if (!genlh)
+ goto failure;
+
+ if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
+ nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
+ goto failure;
+
+ genlmsg_end(skb, genlh);
+ return 0;
+
+failure:
+ genlmsg_cancel(skb, genlh);
+ return -EMSGSIZE;
+}
+
+static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp0_header *gtp0;
+ struct echo_info echo;
+ struct sk_buff *msg;
+ struct iphdr *iph;
+ int ret;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if (!gtp0_validate_echo_hdr(gtp0))
+ return -1;
+
+ iph = ip_hdr(skb);
+ echo.ms_addr_ip4.s_addr = iph->daddr;
+ echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.gtp_version = GTP_V0;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+ msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
@@ -231,6 +419,16 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp0->flags >> 5) != GTP_V0)
return 1;
+ /* If the sockets were created in kernel, it means that
+ * there is no daemon running in userspace which would
+ * handle echo request.
+ */
+ if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
+ return gtp0_send_echo_resp(gtp, skb);
+
+ if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
+ return gtp0_handle_echo_resp(gtp, skb);
+
if (gtp0->type != GTP_TPDU)
return 1;
@@ -243,6 +441,131 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
return gtp_rx(pctx, skb, hdrlen, gtp->role);
}
+/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
+static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
+{
+ int len_pkt, len_hdr;
+
+ /* S flag must be set to 1 */
+ hdr->flags = 0x32; /* v1, GTP-non-prime. */
+ hdr->type = msg_type;
+ /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
+ hdr->tid = 0;
+
+ /* seq, npdu and next should be counted to the length of the GTP packet
+ * that's why szie of gtp1_header should be subtracted,
+ * not size of gtp1_header_long.
+ */
+
+ len_hdr = sizeof(struct gtp1_header);
+
+ if (msg_type == GTP_ECHO_RSP) {
+ len_pkt = sizeof(struct gtp1u_packet);
+ hdr->length = htons(len_pkt - len_hdr);
+ } else {
+ /* GTP_ECHO_REQ does not carry GTP Information Element,
+ * the why gtp1_header_long is used here.
+ */
+ len_pkt = sizeof(struct gtp1_header_long);
+ hdr->length = htons(len_pkt - len_hdr);
+ }
+}
+
+static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp1_header_long *gtp1u;
+ struct gtp1u_packet *gtp_pkt;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+
+ gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+ /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+ * Error Indication and Supported Extension Headers Notification
+ * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+ */
+ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+ return -1;
+
+ /* pull GTP and UDP headers */
+ skb_pull_data(skb,
+ sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
+
+ gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
+ memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
+
+ gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
+
+ /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
+ * Recovery information element shall not be used, i.e. it shall
+ * be set to zero by the sender and shall be ignored by the receiver.
+ * The Recovery information element is mandatory due to backwards
+ * compatibility reasons.
+ */
+ gtp_pkt->ie.tag = GTPIE_RECOVERY;
+ gtp_pkt->ie.val = 0;
+
+ iph = ip_hdr(skb);
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP1U_PORT), htons(GTP1U_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
+static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct gtp1_header_long *gtp1u;
+ struct echo_info echo;
+ struct sk_buff *msg;
+ struct iphdr *iph;
+ int ret;
+
+ gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
+
+ /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
+ * Error Indication and Supported Extension Headers Notification
+ * messages, the S flag shall be set to 1 and TEID shall be set to 0.
+ */
+ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
+ return -1;
+
+ iph = ip_hdr(skb);
+ echo.ms_addr_ip4.s_addr = iph->daddr;
+ echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.gtp_version = GTP_V1;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
+ msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+}
+
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
@@ -258,6 +581,16 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if ((gtp1->flags >> 5) != GTP_V1)
return 1;
+ /* If the sockets were created in kernel, it means that
+ * there is no daemon running in userspace which would
+ * handle echo request.
+ */
+ if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
+ return gtp1u_send_echo_resp(gtp, skb);
+
+ if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
+ return gtp1u_handle_echo_resp(gtp, skb);
+
if (gtp1->type != GTP_TPDU)
return 1;
@@ -320,8 +653,16 @@ static void gtp_encap_disable_sock(struct sock *sk)
static void gtp_encap_disable(struct gtp_dev *gtp)
{
- gtp_encap_disable_sock(gtp->sk0);
- gtp_encap_disable_sock(gtp->sk1u);
+ if (gtp->sk_created) {
+ udp_tunnel_sock_release(gtp->sk0->sk_socket);
+ udp_tunnel_sock_release(gtp->sk1u->sk_socket);
+ gtp->sk_created = false;
+ gtp->sk0 = NULL;
+ gtp->sk1u = NULL;
+ } else {
+ gtp_encap_disable_sock(gtp->sk0);
+ gtp_encap_disable_sock(gtp->sk1u);
+ }
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
@@ -388,20 +729,6 @@ static void gtp_dev_uninit(struct net_device *dev)
free_percpu(dev->tstats);
}
-static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
- const struct sock *sk,
- __be32 daddr)
-{
- memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = sk->sk_bound_dev_if;
- fl4->daddr = daddr;
- fl4->saddr = inet_sk(sk)->inet_saddr;
- fl4->flowi4_tos = RT_CONN_FLAGS(sk);
- fl4->flowi4_proto = sk->sk_protocol;
-
- return ip_route_output_key(sock_net(sk), fl4);
-}
-
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
{
int payload_len = skb->len;
@@ -507,7 +834,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
}
netdev_dbg(dev, "found PDP context %p\n", pctx);
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
+ inet_sk(pctx->sk)->inet_saddr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
&pctx->peer_addr_ip4.s_addr);
@@ -656,17 +984,69 @@ static void gtp_destructor(struct net_device *dev)
kfree(gtp->tid_hash);
}
+static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {};
+ struct udp_port_cfg udp_conf = {
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .family = AF_INET,
+ };
+ struct net *net = gtp->net;
+ struct socket *sock;
+ int err;
+
+ if (type == UDP_ENCAP_GTP0)
+ udp_conf.local_udp_port = htons(GTP0_PORT);
+ else if (type == UDP_ENCAP_GTP1U)
+ udp_conf.local_udp_port = htons(GTP1U_PORT);
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err)
+ return ERR_PTR(err);
+
+ tuncfg.sk_user_data = gtp;
+ tuncfg.encap_type = type;
+ tuncfg.encap_rcv = gtp_encap_recv;
+ tuncfg.encap_destroy = NULL;
+
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+
+ return sock->sk;
+}
+
+static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
+{
+ struct sock *sk1u = NULL;
+ struct sock *sk0 = NULL;
+
+ sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
+ if (IS_ERR(sk0))
+ return PTR_ERR(sk0);
+
+ sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
+ if (IS_ERR(sk1u)) {
+ udp_tunnel_sock_release(sk0->sk_socket);
+ return PTR_ERR(sk1u);
+ }
+
+ gtp->sk_created = true;
+ gtp->sk0 = sk0;
+ gtp->sk1u = sk1u;
+
+ return 0;
+}
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
int hashsize, err;
- if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
- return -EINVAL;
-
gtp = netdev_priv(dev);
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
@@ -677,11 +1057,28 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
hashsize = 1024;
}
+ if (data[IFLA_GTP_ROLE]) {
+ role = nla_get_u32(data[IFLA_GTP_ROLE]);
+ if (role > GTP_ROLE_SGSN)
+ return -EINVAL;
+ }
+ gtp->role = role;
+
+ if (!data[IFLA_GTP_RESTART_COUNT])
+ gtp->restart_count = 0;
+ else
+ gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
+
+ gtp->net = src_net;
+
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
return err;
- err = gtp_encap_enable(gtp, data);
+ if (data[IFLA_GTP_CREATE_SOCKETS])
+ err = gtp_create_sockets(gtp, data);
+ else
+ err = gtp_encap_enable(gtp, data);
if (err < 0)
goto out_hashtable;
@@ -726,6 +1123,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_FD1] = { .type = NLA_U32 },
[IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
+ [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
+ [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -740,7 +1139,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
static size_t gtp_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
- nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */
+ nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
+ nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
}
static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -751,6 +1151,8 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
+ goto nla_put_failure;
return 0;
@@ -848,7 +1250,9 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
{
struct sock *sk1u = NULL;
struct sock *sk0 = NULL;
- unsigned int role = GTP_ROLE_GGSN;
+
+ if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
+ return -EINVAL;
if (data[IFLA_GTP_FD0]) {
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
@@ -868,18 +1272,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
}
}
- if (data[IFLA_GTP_ROLE]) {
- role = nla_get_u32(data[IFLA_GTP_ROLE]);
- if (role > GTP_ROLE_SGSN) {
- gtp_encap_disable_sock(sk0);
- gtp_encap_disable_sock(sk1u);
- return -EINVAL;
- }
- }
-
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
- gtp->role = role;
return 0;
}
@@ -1183,16 +1577,6 @@ out_unlock:
return err;
}
-static struct genl_family gtp_genl_family;
-
-enum gtp_multicast_groups {
- GTP_GENL_MCGRP,
-};
-
-static const struct genl_multicast_group gtp_genl_mcgrps[] = {
- [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
-};
-
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
@@ -1336,6 +1720,95 @@ out:
return skb->len;
}
+static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *skb_to_send;
+ __be32 src_ip, dst_ip;
+ unsigned int version;
+ struct gtp_dev *gtp;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct sock *sk;
+ __be16 port;
+ int len;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK] ||
+ !info->attrs[GTPA_PEER_ADDRESS] ||
+ !info->attrs[GTPA_MS_ADDRESS])
+ return -EINVAL;
+
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
+ src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+ if (!gtp)
+ return -ENODEV;
+
+ if (!gtp->sk_created)
+ return -EOPNOTSUPP;
+ if (!(gtp->dev->flags & IFF_UP))
+ return -ENETDOWN;
+
+ if (version == GTP_V0) {
+ struct gtp0_header *gtp0_h;
+
+ len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+
+ skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+ if (!skb_to_send)
+ return -ENOMEM;
+
+ sk = gtp->sk0;
+ port = htons(GTP0_PORT);
+
+ gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
+ memset(gtp0_h, 0, sizeof(struct gtp0_header));
+ gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
+ } else if (version == GTP_V1) {
+ struct gtp1_header_long *gtp1u_h;
+
+ len = LL_RESERVED_SPACE(gtp->dev) +
+ sizeof(struct gtp1_header_long) +
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+
+ skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
+ if (!skb_to_send)
+ return -ENOMEM;
+
+ sk = gtp->sk1u;
+ port = htons(GTP1U_PORT);
+
+ gtp1u_h = skb_push(skb_to_send,
+ sizeof(struct gtp1_header_long));
+ memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
+ gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
+ } else {
+ return -ENODEV;
+ }
+
+ rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
+ &dst_ip);
+ kfree_skb(skb_to_send);
+ return -ENODEV;
+ }
+
+ udp_tunnel_xmit_skb(rt, sk, skb_to_send,
+ fl4.saddr, fl4.daddr,
+ fl4.flowi4_tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ port, port,
+ !net_eq(sock_net(sk),
+ dev_net(gtp->dev)),
+ false);
+ return 0;
+}
+
static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_LINK] = { .type = NLA_U32, },
[GTPA_VERSION] = { .type = NLA_U32, },
@@ -1368,6 +1841,12 @@ static const struct genl_small_ops gtp_genl_ops[] = {
.dumpit = gtp_genl_dump_pdp,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = GTP_CMD_ECHOREQ,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = gtp_genl_send_echo_req,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family gtp_genl_family __ro_after_init = {
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a03d0b474641..3e69079ed694 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -982,10 +982,10 @@ static int baycom_setmode(struct baycom_state *bc, const char *modestr)
bc->cfg.extmodem = 0;
if (strstr(modestr,"extmodem"))
bc->cfg.extmodem = 1;
- if (strstr(modestr,"noloopback"))
- bc->cfg.loopback = 0;
if (strstr(modestr,"loopback"))
bc->cfg.loopback = 1;
+ if (strstr(modestr, "noloopback"))
+ bc->cfg.loopback = 0;
if ((cp = strstr(modestr,"fclk="))) {
bc->cfg.fclk = simple_strtoul(cp+5, NULL, 0);
if (bc->cfg.fclk < 1000000)
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 7e527499d3ad..a2a12208e3ad 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
#include <net/ax25.h>
#include "z8530.h"
@@ -377,7 +378,7 @@ static int __init dmascc_init(void)
udelay(2000000 / TMR_0_HZ);
/* Timing loop */
- while (jiffies - time < 13) {
+ while (time_is_after_jiffies(time + 13)) {
for (i = 0; i < hw[h].num_devs; i++)
if (base[i] && counting[i]) {
/* Read back Timer 1: latch; read LSB; read MSB */
@@ -525,7 +526,7 @@ static int __init setup_adapter(int card_base, int type, int n)
/* Wait and detect IRQ */
time = jiffies;
- while (jiffies - time < 2 + HZ / TMR_0_HZ);
+ while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ));
irq = probe_irq_off(irqs);
/* Clear pending interrupt, disable interrupts */
@@ -1353,7 +1354,7 @@ static void es_isr(struct scc_priv *priv)
/* Switch state */
write_scc(priv, R15, 0);
if (priv->tx_count &&
- (jiffies - priv->tx_start) < priv->param.txtimeout) {
+ time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) {
priv->state = TX_PAUSE;
start_timer(priv, priv->param.txpause, 0);
} else {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index afa81a9480cc..9442f751ad3a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -154,19 +154,15 @@ static void free_netvsc_device(struct rcu_head *head)
kfree(nvdev->extension);
- if (nvdev->recv_original_buf) {
- hv_unmap_memory(nvdev->recv_buf);
+ if (nvdev->recv_original_buf)
vfree(nvdev->recv_original_buf);
- } else {
+ else
vfree(nvdev->recv_buf);
- }
- if (nvdev->send_original_buf) {
- hv_unmap_memory(nvdev->send_buf);
+ if (nvdev->send_original_buf)
vfree(nvdev->send_original_buf);
- } else {
+ else
vfree(nvdev->send_buf);
- }
bitmap_free(nvdev->send_section_map);
@@ -765,6 +761,12 @@ void netvsc_device_remove(struct hv_device *device)
netvsc_teardown_send_gpadl(device, net_device, ndev);
}
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
/* Release all resources */
free_netvsc_device_rcu(net_device);
}
@@ -1628,7 +1630,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
case VM_PKT_DATA_USING_XFER_PAGES:
return netvsc_receive(ndev, net_device, nvchan, desc);
- break;
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(ndev, net_device, desc);
@@ -1821,6 +1822,12 @@ cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
+ if (net_device->recv_original_buf)
+ hv_unmap_memory(net_device->recv_buf);
+
+ if (net_device->send_original_buf)
+ hv_unmap_memory(net_device->send_buf);
+
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 2f5e7b31032a..07bafbf94680 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -74,81 +74,6 @@ struct atusb_chip_data {
int (*set_txpower)(struct ieee802154_hw*, s32);
};
-/* ----- USB commands without data ----------------------------------------- */
-
-/* To reduce the number of error checks in the code, we record the first error
- * in atusb->err and reject all subsequent requests until the error is cleared.
- */
-
-static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
- __u8 request, __u8 requesttype,
- __u16 value, __u16 index,
- void *data, __u16 size, int timeout)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
- int ret;
-
- if (atusb->err)
- return atusb->err;
-
- ret = usb_control_msg(usb_dev, pipe, request, requesttype,
- value, index, data, size, timeout);
- if (ret < size) {
- ret = ret < 0 ? ret : -ENODATA;
-
- atusb->err = ret;
- dev_err(&usb_dev->dev,
- "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
- __func__, request, value, index, ret);
- }
- return ret;
-}
-
-static int atusb_command(struct atusb *atusb, u8 cmd, u8 arg)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
-
- dev_dbg(&usb_dev->dev, "%s: cmd = 0x%x\n", __func__, cmd);
- return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
- cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
-}
-
-static int atusb_write_reg(struct atusb *atusb, u8 reg, u8 value)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
-
- dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
- return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
- ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
- value, reg, NULL, 0, 1000);
-}
-
-static int atusb_read_reg(struct atusb *atusb, u8 reg)
-{
- struct usb_device *usb_dev = atusb->usb_dev;
- int ret;
- u8 *buffer;
- u8 value;
-
- buffer = kmalloc(1, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- dev_dbg(&usb_dev->dev, "%s: reg = 0x%x\n", __func__, reg);
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, buffer, 1, 1000);
-
- if (ret >= 0) {
- value = buffer[0];
- kfree(buffer);
- return value;
- } else {
- kfree(buffer);
- return ret;
- }
-}
-
static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
u8 shift, u8 value)
{
@@ -158,7 +83,10 @@ static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
- orig = atusb_read_reg(atusb, reg);
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, reg, &orig, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
/* Write the value only into that part of the register which is allowed
* by the mask. All other bits stay as before.
@@ -167,7 +95,8 @@ static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
tmp |= (value << shift) & mask;
if (tmp != orig)
- ret = atusb_write_reg(atusb, reg, tmp);
+ ret = usb_control_msg_send(usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ tmp, reg, NULL, 0, 1000, GFP_KERNEL);
return ret;
}
@@ -176,12 +105,16 @@ static int atusb_read_subreg(struct atusb *lp,
unsigned int addr, unsigned int mask,
unsigned int shift)
{
- int rc;
+ int reg, ret;
+
+ ret = usb_control_msg_recv(lp->usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, addr, &reg, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- rc = atusb_read_reg(lp, addr);
- rc = (rc & mask) >> shift;
+ reg = (reg & mask) >> shift;
- return rc;
+ return reg;
}
static int atusb_get_and_clear_error(struct atusb *atusb)
@@ -419,16 +352,22 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
u16 addr = le16_to_cpu(filt->short_addr);
dev_vdbg(dev, "%s called for saddr\n", __func__);
- atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
- atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr, RG_SHORT_ADDR_0, NULL, 0, 1000, GFP_KERNEL);
+
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr >> 8, RG_SHORT_ADDR_1, NULL, 0, 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
dev_vdbg(dev, "%s called for pan id\n", __func__);
- atusb_write_reg(atusb, RG_PAN_ID_0, pan);
- atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ pan, RG_PAN_ID_0, NULL, 0, 1000, GFP_KERNEL);
+
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ pan >> 8, RG_PAN_ID_1, NULL, 0, 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
@@ -437,7 +376,9 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
dev_vdbg(dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
- atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ addr[i], RG_IEEE_ADDR_0 + i, NULL, 0,
+ 1000, GFP_KERNEL);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
@@ -459,7 +400,8 @@ static int atusb_start(struct ieee802154_hw *hw)
dev_dbg(&usb_dev->dev, "%s\n", __func__);
schedule_delayed_work(&atusb->work, 0);
- atusb_command(atusb, ATUSB_RX_MODE, 1);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 1, 0,
+ NULL, 0, 1000, GFP_KERNEL);
ret = atusb_get_and_clear_error(atusb);
if (ret < 0)
usb_kill_anchored_urbs(&atusb->idle_urbs);
@@ -473,7 +415,8 @@ static void atusb_stop(struct ieee802154_hw *hw)
dev_dbg(&usb_dev->dev, "%s\n", __func__);
usb_kill_anchored_urbs(&atusb->idle_urbs);
- atusb_command(atusb, ATUSB_RX_MODE, 0);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 0, 0,
+ NULL, 0, 1000, GFP_KERNEL);
atusb_get_and_clear_error(atusb);
}
@@ -580,9 +523,11 @@ atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val)
{
- unsigned int cca_ed_thres;
+ int cca_ed_thres;
cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES);
+ if (cca_ed_thres < 0)
+ return cca_ed_thres;
switch (rssi_base_val) {
case -98:
@@ -799,18 +744,13 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char *hw_name;
- unsigned char *buffer;
+ unsigned char buffer[3];
int ret;
- buffer = kmalloc(3, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* Get a couple of the ATMega Firmware values */
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
- buffer, 3, 1000);
- if (ret >= 0) {
+ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, 3, 1000, GFP_KERNEL);
+ if (!ret) {
atusb->fw_ver_maj = buffer[0];
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
@@ -849,7 +789,6 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
- kfree(buffer);
return ret;
}
@@ -863,7 +802,6 @@ static int atusb_get_and_show_build(struct atusb *atusb)
if (!build)
return -ENOMEM;
- /* We cannot call atusb_control_msg() here, since this request may read various length data */
ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) {
@@ -881,14 +819,27 @@ static int atusb_get_and_conf_chip(struct atusb *atusb)
u8 man_id_0, man_id_1, part_num, version_num;
const char *chip;
struct ieee802154_hw *hw = atusb->hw;
+ int ret;
- man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
- man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
- part_num = atusb_read_reg(atusb, RG_PART_NUM);
- version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_MAN_ID_0, &man_id_0, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- if (atusb->err)
- return atusb->err;
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_MAN_ID_1, &man_id_1, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_PART_NUM, &part_num, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, RG_VERSION_NUM, &version_num, 1, 1000, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
@@ -969,7 +920,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char *buffer;
+ unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
__le64 extended_addr;
u64 addr;
int ret;
@@ -982,18 +933,12 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
- buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* Firmware is new enough so we fetch the address from EEPROM */
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
- buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
+ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000, GFP_KERNEL);
if (ret < 0) {
dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
- kfree(buffer);
return ret;
}
@@ -1009,7 +954,6 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
- kfree(buffer);
return ret;
}
@@ -1051,7 +995,8 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
- atusb_command(atusb, ATUSB_RF_RESET, 0);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RF_RESET, ATUSB_REQ_TO_DEV, 0, 0,
+ NULL, 0, 1000, GFP_KERNEL);
atusb_get_and_conf_chip(atusb);
atusb_get_and_show_revision(atusb);
atusb_get_and_show_build(atusb);
@@ -1076,7 +1021,9 @@ static int atusb_probe(struct usb_interface *interface,
* explicitly. Any resets after that will send us straight to TRX_OFF,
* making the command below redundant.
*/
- atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ STATE_FORCE_TRX_OFF, RG_TRX_STATE, NULL, 0, 1000, GFP_KERNEL);
+
msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */
#if 0
@@ -1104,7 +1051,8 @@ static int atusb_probe(struct usb_interface *interface,
atusb_write_subreg(atusb, SR_RX_SAFE_MODE, 1);
#endif
- atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ 0xff, RG_IRQ_MASK, NULL, 0, 1000, GFP_KERNEL);
ret = atusb_get_and_clear_error(atusb);
if (!ret)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 36f1c5aa98fc..38c217bd7c82 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -791,7 +791,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
- hw->flags = IEEE802154_HW_PROMISCUOUS;
+ hw->flags = IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_RX_DROP_BAD_CKSUM;
hw->parent = dev;
err = ieee802154_register_hw(hw);
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 1544564bc283..87e1d43c118c 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -320,6 +320,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
atomic_add(tre_count, &trans_info->tre_avail);
}
+/* Return true if no transactions are allocated, false otherwise */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
+{
+ u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
+ struct gsi_trans_info *trans_info;
+
+ trans_info = &gsi->channel[channel_id].trans_info;
+
+ return atomic_read(&trans_info->tre_avail) == tre_max;
+}
+
/* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count,
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 17fd1822d8a9..af379b49299e 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -130,6 +130,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
/**
+ * gsi_channel_trans_idle() - Return whether no transactions are allocated
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ *
+ * Return: True if no transactions are allocated, false otherwise
+ *
+ */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
+
+/**
* gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
* @gsi: GSI pointer
* @channel_id: Channel the transaction is associated with
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c
index 06ddb85f39b2..8ff351aefd23 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/ipa_data-v3.1.c
@@ -101,6 +101,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -148,6 +149,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index 760c22bbdf70..d1c466abddb2 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -92,6 +92,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -140,6 +141,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index fea91451a0c3..b1991cc6f0ca 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -86,6 +86,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -133,6 +134,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 32768,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c
index 2a231e79d5e1..1190a43e8743 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -82,6 +82,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -130,6 +131,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index 2da2c4194f2e..944f72b0f285 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -95,6 +95,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index 2421b5abb5d4..16786bff7ef8 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -87,6 +87,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
},
},
@@ -134,6 +135,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 6d329e9ce5d2..dbbeecf6df29 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -112,6 +112,7 @@ struct ipa_endpoint_tx_data {
/**
* struct ipa_endpoint_rx_data - configuration data for RX endpoints
+ * @buffer_size: requested receive buffer size (bytes)
* @pad_align: power-of-2 boundary to which packet payload is aligned
* @aggr_close_eof: whether aggregation closes on end-of-frame
*
@@ -125,6 +126,7 @@ struct ipa_endpoint_tx_data {
* a "frame" consisting of several transfers has ended.
*/
struct ipa_endpoint_rx_data {
+ u32 buffer_size;
u32 pad_align;
bool aggr_close_eof;
};
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 68291a3efd04..888e94278a84 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -25,10 +25,8 @@
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-#define IPA_REPLENISH_BATCH 16
-
-/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
-#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
+/* Hardware is told about receive buffers once a "batch" has been queued */
+#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
@@ -75,6 +73,14 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
+static u32 aggr_byte_limit_max(enum ipa_version version)
+{
+ if (version < IPA_VERSION_4_5)
+ return field_max(aggr_byte_limit_fmask(true));
+
+ return field_max(aggr_byte_limit_fmask(false));
+}
+
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -87,6 +93,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
if (!data->toward_ipa) {
+ u32 buffer_size;
+ u32 limit;
+
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
"RX endpoint %u\n",
@@ -94,6 +103,41 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false;
}
+ /* Nothing more to check for non-AP RX */
+ if (data->ee_id != GSI_EE_AP)
+ return true;
+
+ buffer_size = data->endpoint.config.rx.buffer_size;
+ /* The buffer size must hold an MTU plus overhead */
+ limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (buffer_size < limit) {
+ dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
+ data->endpoint_id, buffer_size, limit);
+ return false;
+ }
+
+ /* For an endpoint supporting receive aggregation, the
+ * aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, so we need to supply enough space in
+ * a receive buffer to hold a complete MTU plus normal skb
+ * overhead *after* that aggregation byte limit has been
+ * crossed.
+ *
+ * This check just ensures the receive buffer size doesn't
+ * exceed what's representable in the aggregation limit field.
+ */
+ if (data->endpoint.config.aggregation) {
+ limit += SZ_1K * aggr_byte_limit_max(ipa->version);
+ if (buffer_size > limit) {
+ dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
+ data->endpoint_id, buffer_size, limit);
+
+ return false;
+ }
+ }
+
return true; /* Nothing more to check for RX */
}
@@ -156,21 +200,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
-static u32 aggr_byte_limit_max(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_5)
- return field_max(aggr_byte_limit_fmask(true));
-
- return field_max(aggr_byte_limit_fmask(false));
-}
-
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
- u32 limit;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
@@ -178,26 +213,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false;
}
- /* The aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, which means that we need to supply
- * enough space in a receive buffer to hold a complete MTU
- * plus normal skb overhead *after* that aggregation byte
- * limit has been crossed.
- *
- * This check ensures we don't define a receive buffer size
- * that would exceed what we can represent in the field that
- * is used to program its size.
- */
- limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
- limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
- if (limit < IPA_RX_BUFFER_SIZE) {
- dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
- IPA_RX_BUFFER_SIZE, limit);
- return false;
- }
-
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
@@ -723,13 +738,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
+ const struct ipa_endpoint_rx_data *rx_data;
bool close_eof;
u32 limit;
+ rx_data = &endpoint->data->rx;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ limit = ipa_aggr_size_kb(rx_data->buffer_size);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
@@ -737,7 +754,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = endpoint->data->rx.aggr_close_eof;
+ close_eof = rx_data->aggr_close_eof;
val |= aggr_sw_eof_active_encoded(version, close_eof);
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
@@ -1020,134 +1037,98 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
iowrite32(val, ipa->reg_virt + offset);
}
-static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
- struct gsi_trans *trans;
- bool doorbell = false;
struct page *page;
+ u32 buffer_size;
u32 offset;
u32 len;
int ret;
- page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
+ buffer_size = endpoint->data->rx.buffer_size;
+ page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
- trans = ipa_endpoint_trans_alloc(endpoint, 1);
- if (!trans)
- goto err_free_pages;
-
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
- len = IPA_RX_BUFFER_SIZE - offset;
+ len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
- goto err_trans_free;
- trans->data = page; /* transaction owns page now */
-
- if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
- doorbell = true;
- endpoint->replenish_ready = 0;
- }
-
- gsi_trans_commit(trans, doorbell);
-
- return 0;
-
-err_trans_free:
- gsi_trans_free(trans);
-err_free_pages:
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ __free_pages(page, get_order(buffer_size));
+ else
+ trans->data = page; /* transaction owns page now */
- return -ENOMEM;
+ return ret;
}
/**
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @add_one: Whether this is replacing a just-consumed buffer
*
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
- * an endpoint can be disabled, in which case requests to replenish a
- * buffer are "saved", and transferred to the backlog once it is re-enabled
- * again.
+ * an endpoint can be disabled, in which case buffers are not queued to
+ * the hardware.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi;
- u32 backlog;
- int delta;
+ struct gsi_trans *trans;
- if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
- if (add_one)
- atomic_inc(&endpoint->replenish_saved);
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return;
- }
- /* If already active, just update the backlog */
- if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ /* Skip it if it's already active */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
return;
- }
- while (atomic_dec_not_zero(&endpoint->replenish_backlog))
- if (ipa_endpoint_replenish_one(endpoint))
+ while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
+ bool doorbell;
+
+ if (ipa_endpoint_replenish_one(endpoint, trans))
goto try_again_later;
- clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ /* Ring the doorbell if we've got a full batch */
+ doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
+ gsi_trans_commit(trans, doorbell);
+ }
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
return;
try_again_later:
+ gsi_trans_free(trans);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- /* The last one didn't succeed, so fix the backlog */
- delta = add_one ? 2 : 1;
- backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
-
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt.
- * Receive buffer transactions use one TRE, so schedule work to
- * try replenishing again if our backlog is *all* available TREs.
+ * If the hardware has no receive buffers queued, schedule work to
+ * try replenishing again.
*/
- gsi = &endpoint->ipa->gsi;
- if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi = &endpoint->ipa->gsi;
- u32 max_backlog;
- u32 saved;
-
set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
- while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
- atomic_add(saved, &endpoint->replenish_backlog);
/* Start replenishing if hardware currently has no buffers */
- max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
- if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, false);
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
- u32 backlog;
-
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
- while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
- atomic_add(backlog, &endpoint->replenish_saved);
}
static void ipa_endpoint_replenish_work(struct work_struct *work)
@@ -1157,7 +1138,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, false);
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1183,15 +1164,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
+ u32 buffer_size = endpoint->data->rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
if (!endpoint->netdev)
return false;
- WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
+ WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
- skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ skb = build_skb(page_address(page), buffer_size);
if (skb) {
/* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD);
@@ -1289,8 +1271,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
+ u32 buffer_size = endpoint->data->rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
- u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 unused = buffer_size - total_len;
u32 resid = total_len;
while (resid) {
@@ -1360,10 +1343,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
- ipa_endpoint_replenish(endpoint, true);
-
if (trans->cancelled)
- return;
+ goto done;
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
@@ -1371,6 +1352,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
+done:
+ ipa_endpoint_replenish(endpoint);
}
void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
@@ -1398,8 +1381,11 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
} else {
struct page *page = trans->data;
- if (page)
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ if (page) {
+ u32 buffer_size = endpoint->data->rx.buffer_size;
+
+ __free_pages(page, get_order(buffer_size));
+ }
}
}
@@ -1704,9 +1690,6 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
*/
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- atomic_set(&endpoint->replenish_saved,
- gsi_channel_tre_max(gsi, endpoint->channel_id));
- atomic_set(&endpoint->replenish_backlog, 0);
INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work);
}
@@ -1882,6 +1865,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 filter_map;
+ BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
+
if (!ipa_endpoint_data_valid(ipa, count, data))
return 0; /* Error */
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 0313cdc607de..12fd5b16c18e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -65,9 +65,7 @@ enum ipa_replenish_flag {
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
* @replenish_flags: Replenishing state flags
- * @replenish_ready: Number of replenish transactions without doorbell
- * @replenish_saved: Replenish requests held while disabled
- * @replenish_backlog: Number of buffers needed to fill hardware queue
+ * @replenish_count: Total number of replenish transactions committed
* @replenish_work: Work item used for repeated replenish failures
*/
struct ipa_endpoint {
@@ -86,9 +84,7 @@ struct ipa_endpoint {
/* Receive buffer replenishing for RX endpoints */
DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
- u32 replenish_ready;
- atomic_t replenish_saved;
- atomic_t replenish_backlog;
+ u64 replenish_count;
struct delayed_work replenish_work; /* global wq */
};
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index f2989aac47a6..db5ac7552286 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -35,18 +35,6 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * struct ipa_interconnect - IPA interconnect information
- * @path: Interconnect path
- * @average_bandwidth: Average interconnect bandwidth (KB/second)
- * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
- */
-struct ipa_interconnect {
- struct icc_path *path;
- u32 average_bandwidth;
- u32 peak_bandwidth;
-};
-
-/**
* enum ipa_power_flag - IPA power flags
* @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
@@ -79,164 +67,78 @@ struct ipa_power {
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
- struct ipa_interconnect *interconnect;
+ struct icc_bulk_data interconnect[];
};
-static int ipa_interconnect_init_one(struct device *dev,
- struct ipa_interconnect *interconnect,
- const struct ipa_interconnect_data *data)
-{
- struct icc_path *path;
-
- path = of_icc_get(dev, data->name);
- if (IS_ERR(path)) {
- int ret = PTR_ERR(path);
-
- dev_err_probe(dev, ret, "error getting %s interconnect\n",
- data->name);
-
- return ret;
- }
-
- interconnect->path = path;
- interconnect->average_bandwidth = data->average_bandwidth;
- interconnect->peak_bandwidth = data->peak_bandwidth;
-
- return 0;
-}
-
-static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
-{
- icc_put(interconnect->path);
- memset(interconnect, 0, sizeof(*interconnect));
-}
-
/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
+static int ipa_interconnect_init(struct ipa_power *power,
const struct ipa_interconnect_data *data)
{
- struct ipa_interconnect *interconnect;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
- if (!interconnect)
- return -ENOMEM;
- power->interconnect = interconnect;
-
- while (count--) {
- ret = ipa_interconnect_init_one(dev, interconnect, data++);
- if (ret)
- goto out_unwind;
- interconnect++;
- }
-
- return 0;
-
-out_unwind:
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-
- return ret;
-}
-
-/* Inverse of ipa_interconnect_init() */
-static void ipa_interconnect_exit(struct ipa_power *power)
-{
- struct ipa_interconnect *interconnect;
-
- interconnect = power->interconnect + power->interconnect_count;
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-}
-
-/* Currently we only use one bandwidth level, so just "enable" interconnects */
-static int ipa_interconnect_enable(struct ipa *ipa)
-{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
+ struct icc_bulk_data *interconnect;
int ret;
u32 i;
- interconnect = power->interconnect;
+ /* Initialize our interconnect data array for bulk operations */
+ interconnect = &power->interconnect[0];
for (i = 0; i < power->interconnect_count; i++) {
- ret = icc_set_bw(interconnect->path,
- interconnect->average_bandwidth,
- interconnect->peak_bandwidth);
- if (ret) {
- dev_err(&ipa->pdev->dev,
- "error %d enabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- goto out_unwind;
- }
+ /* interconnect->path is filled in by of_icc_bulk_get() */
+ interconnect->name = data->name;
+ interconnect->avg_bw = data->average_bandwidth;
+ interconnect->peak_bw = data->peak_bandwidth;
+ data++;
interconnect++;
}
- return 0;
+ ret = of_icc_bulk_get(power->dev, power->interconnect_count,
+ power->interconnect);
+ if (ret)
+ return ret;
-out_unwind:
- while (interconnect-- > power->interconnect)
- (void)icc_set_bw(interconnect->path, 0, 0);
+ /* All interconnects are initially disabled */
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
+
+ /* Set the bandwidth values to be used when enabled */
+ ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
+ if (ret)
+ icc_bulk_put(power->interconnect_count, power->interconnect);
return ret;
}
-/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_power *power)
{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
- struct device *dev = &ipa->pdev->dev;
- int result = 0;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = power->interconnect + count;
- while (count--) {
- interconnect--;
- ret = icc_set_bw(interconnect->path, 0, 0);
- if (ret) {
- dev_err(dev, "error %d disabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- /* Try to disable all; record only the first error */
- if (!result)
- result = ret;
- }
- }
-
- return result;
+ icc_bulk_put(power->interconnect_count, power->interconnect);
}
/* Enable IPA power, enabling interconnects and the core clock */
static int ipa_power_enable(struct ipa *ipa)
{
+ struct ipa_power *power = ipa->power;
int ret;
- ret = ipa_interconnect_enable(ipa);
+ ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
if (ret)
return ret;
- ret = clk_prepare_enable(ipa->power->core);
+ ret = clk_prepare_enable(power->core);
if (ret) {
- dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
- (void)ipa_interconnect_disable(ipa);
+ dev_err(power->dev, "error %d enabling core clock\n", ret);
+ icc_bulk_disable(power->interconnect_count,
+ power->interconnect);
}
return ret;
}
/* Inverse of ipa_power_enable() */
-static int ipa_power_disable(struct ipa *ipa)
+static void ipa_power_disable(struct ipa *ipa)
{
- clk_disable_unprepare(ipa->power->core);
+ struct ipa_power *power = ipa->power;
- return ipa_interconnect_disable(ipa);
+ clk_disable_unprepare(power->core);
+
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
}
static int ipa_runtime_suspend(struct device *dev)
@@ -250,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev)
gsi_suspend(&ipa->gsi);
}
- return ipa_power_disable(ipa);
+ ipa_power_disable(ipa);
+
+ return 0;
}
static int ipa_runtime_resume(struct device *dev)
@@ -453,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
{
struct ipa_power *power;
struct clk *clk;
+ size_t size;
int ret;
clk = clk_get(dev, "core");
@@ -469,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
goto err_clk_put;
}
- power = kzalloc(sizeof(*power), GFP_KERNEL);
+ size = struct_size(power, interconnect, data->interconnect_count);
+ power = kzalloc(size, GFP_KERNEL);
if (!power) {
ret = -ENOMEM;
goto err_clk_put;
@@ -479,7 +385,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(power, dev, data->interconnect_data);
+ ret = ipa_interconnect_init(power, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c613900c3811..6ffb27419e64 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -555,7 +555,7 @@ static void ipvlan_multicast_enqueue(struct ipvl_port *port,
schedule_work(&port->wq);
} else {
spin_unlock(&port->backlog.lock);
- atomic_long_inc(&skb->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb(skb);
}
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ed0edf5884ef..720394c0639b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -74,11 +74,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* do not fool net_timestamp_check() with various clock bases */
- skb->tstamp = 0;
+ skb_clear_tstamp(skb);
skb_orphan(skb);
- /* Before queueing this packet to netif_rx(),
+ /* Before queueing this packet to __netif_rx(),
* make sure dst is refcounted.
*/
skb_dst_force(skb);
@@ -86,7 +86,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
dev_lstats_add(dev, len);
return NETDEV_TX_OK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3d0874331763..832f09ac075e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1033,7 +1033,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
else
nskb->pkt_type = PACKET_MULTICAST;
- netif_rx(nskb);
+ __netif_rx(nskb);
}
continue;
}
@@ -1056,7 +1056,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
nskb->dev = ndev;
- if (netif_rx(nskb) == NET_RX_SUCCESS) {
+ if (__netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1288,7 +1288,7 @@ nosci:
macsec_reset_skb(nskb, macsec->secy.netdev);
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
if (ret == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUnknownSCI++;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6ef5f77be4d0..069e8824c264 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -285,7 +285,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (likely(nskb))
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE) ?:
- netif_rx_ni(nskb);
+ netif_rx(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, true);
}
@@ -371,7 +371,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
free_nskb:
kfree_skb(nskb);
err:
- atomic_long_inc(&skb->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(skb->dev);
}
static void macvlan_flush_sources(struct macvlan_port *port,
@@ -410,7 +410,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
nskb->pkt_type = PACKET_HOST;
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
}
@@ -468,7 +468,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
/* forward to original port. */
vlan = src;
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
- netif_rx(skb);
+ __netif_rx(skb);
handle_res = RX_HANDLER_CONSUMED;
goto out;
}
@@ -889,7 +889,7 @@ static void macvlan_set_lockdep_class(struct net_device *dev)
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- const struct net_device *lowerdev = vlan->lowerdev;
+ struct net_device *lowerdev = vlan->lowerdev;
struct macvlan_port *port = vlan->port;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
@@ -911,6 +911,9 @@ static int macvlan_init(struct net_device *dev)
port->count += 1;
+ /* Get macvlan's reference to lowerdev */
+ dev_hold_track(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
+
return 0;
}
@@ -1173,6 +1176,14 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_features_check = passthru_features_check,
};
+static void macvlan_dev_free(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ /* Get rid of the macvlan's reference to lowerdev */
+ dev_put_track(vlan->lowerdev, &vlan->dev_tracker);
+}
+
void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -1184,6 +1195,7 @@ void macvlan_common_setup(struct net_device *dev)
dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
+ dev->priv_destructor = macvlan_dev_free;
dev->header_ops = &macvlan_hard_header_ops;
dev->ethtool_ops = &macvlan_ethtool_ops;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6b12902a803f..cecf8c63096c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev)
dev->tx_queue_len = TUN_READQ_SIZE;
}
+static struct net *macvtap_link_net(const struct net_device *dev)
+{
+ return dev_net(macvlan_dev_real_dev(dev));
+}
+
static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
.kind = "macvtap",
.setup = macvtap_setup,
.newlink = macvtap_newlink,
.dellink = macvtap_dellink,
+ .get_link_net = macvtap_link_net,
.priv_size = sizeof(struct macvtap_dev),
};
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index 2929471395ae..dc71657d9184 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -21,6 +21,18 @@ config MCTP_SERIAL
Say y here if you need to connect to MCTP endpoints over serial. To
compile as a module, use m; the module will be called mctp-serial.
+config MCTP_TRANSPORT_I2C
+ tristate "MCTP SMBus/I2C transport"
+ # i2c-mux is optional, but we must build as a module if i2c-mux is a module
+ depends on I2C_MUX || !I2C_MUX
+ depends on I2C
+ depends on I2C_SLAVE
+ select MCTP_FLOWS
+ help
+ Provides a driver to access MCTP devices over SMBus/I2C transport,
+ from DMTF specification DSP0237. A MCTP protocol network device is
+ created for each I2C bus that has been assigned a mctp-i2c device.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index d32622613ce4..1ca3e6028f77 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
+obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
new file mode 100644
index 000000000000..baf7afac7857
--- /dev/null
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Controller Transport Protocol (MCTP)
+ * Implements DMTF specification
+ * "DSP0237 Management Component Transport Protocol (MCTP) SMBus/I2C
+ * Transport Binding"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.2.0.pdf
+ *
+ * A netdev is created for each I2C bus that handles MCTP. In the case of an I2C
+ * mux topology a single I2C client is attached to the root of the mux topology,
+ * shared between all mux I2C busses underneath. For non-mux cases an I2C client
+ * is attached per netdev.
+ *
+ * mctp-i2c-controller.yml devicetree binding has further details.
+ *
+ * Copyright (c) 2022 Code Construct
+ * Copyright (c) 2022 Google
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/if_arp.h>
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+
+/* byte_count is limited to u8 */
+#define MCTP_I2C_MAXBLOCK 255
+/* One byte is taken by source_slave */
+#define MCTP_I2C_MAXMTU (MCTP_I2C_MAXBLOCK - 1)
+#define MCTP_I2C_MINMTU (64 + 4)
+/* Allow space for dest_address, command, byte_count, data, PEC */
+#define MCTP_I2C_BUFSZ (3 + MCTP_I2C_MAXBLOCK + 1)
+#define MCTP_I2C_MINLEN 8
+#define MCTP_I2C_COMMANDCODE 0x0f
+#define MCTP_I2C_TX_WORK_LEN 100
+/* Sufficient for 64kB at min mtu */
+#define MCTP_I2C_TX_QUEUE_LEN 1100
+
+#define MCTP_I2C_OF_PROP "mctp-controller"
+
+enum {
+ MCTP_I2C_FLOW_STATE_NEW = 0,
+ MCTP_I2C_FLOW_STATE_ACTIVE,
+};
+
+/* List of all struct mctp_i2c_client
+ * Lock protects driver_clients and also prevents adding/removing adapters
+ * during mctp_i2c_client probe/remove.
+ */
+static DEFINE_MUTEX(driver_clients_lock);
+static LIST_HEAD(driver_clients);
+
+struct mctp_i2c_client;
+
+/* The netdev structure. One of these per I2C adapter. */
+struct mctp_i2c_dev {
+ struct net_device *ndev;
+ struct i2c_adapter *adapter;
+ struct mctp_i2c_client *client;
+ struct list_head list; /* For mctp_i2c_client.devs */
+
+ size_t rx_pos;
+ u8 rx_buffer[MCTP_I2C_BUFSZ];
+ struct completion rx_done;
+
+ struct task_struct *tx_thread;
+ wait_queue_head_t tx_wq;
+ struct sk_buff_head tx_queue;
+ u8 tx_scratch[MCTP_I2C_BUFSZ];
+
+ /* A fake entry in our tx queue to perform an unlock operation */
+ struct sk_buff unlock_marker;
+
+ /* Spinlock protects i2c_lock_count, release_count, allow_rx */
+ spinlock_t lock;
+ int i2c_lock_count;
+ int release_count;
+ /* Indicates that the netif is ready to receive incoming packets */
+ bool allow_rx;
+
+};
+
+/* The i2c client structure. One per hardware i2c bus at the top of the
+ * mux tree, shared by multiple netdevs
+ */
+struct mctp_i2c_client {
+ struct i2c_client *client;
+ u8 lladdr;
+
+ struct mctp_i2c_dev *sel;
+ struct list_head devs;
+ spinlock_t sel_lock; /* Protects sel and devs */
+
+ struct list_head list; /* For driver_clients */
+};
+
+/* Header on the wire. */
+struct mctp_i2c_hdr {
+ u8 dest_slave;
+ u8 command;
+ /* Count of bytes following byte_count, excluding PEC */
+ u8 byte_count;
+ u8 source_slave;
+};
+
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev);
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+static void mctp_i2c_ndo_uninit(struct net_device *dev);
+static int mctp_i2c_ndo_open(struct net_device *dev);
+
+static struct i2c_adapter *mux_root_adapter(struct i2c_adapter *adap)
+{
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ return i2c_root_adapter(&adap->dev);
+#else
+ /* In non-mux config all i2c adapters are root adapters */
+ return adap;
+#endif
+}
+
+/* Creates a new i2c slave device attached to the root adapter.
+ * Sets up the slave callback.
+ * Must be called with a client on a root adapter.
+ */
+static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ struct i2c_adapter *root = NULL;
+ int rc;
+
+ if (client->flags & I2C_CLIENT_TEN) {
+ dev_err(&client->dev, "failed, MCTP requires a 7-bit I2C address, addr=0x%x\n",
+ client->addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ root = mux_root_adapter(client->adapter);
+ if (!root) {
+ dev_err(&client->dev, "failed to find root adapter\n");
+ rc = -ENOENT;
+ goto err;
+ }
+ if (root != client->adapter) {
+ dev_err(&client->dev,
+ "A mctp-i2c-controller client cannot be placed on an I2C mux adapter.\n"
+ " It should be placed on the mux tree root adapter\n"
+ " then set mctp-controller property on adapters to attach\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ mcli = kzalloc(sizeof(*mcli), GFP_KERNEL);
+ if (!mcli) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ spin_lock_init(&mcli->sel_lock);
+ INIT_LIST_HEAD(&mcli->devs);
+ INIT_LIST_HEAD(&mcli->list);
+ mcli->lladdr = client->addr & 0xff;
+ mcli->client = client;
+ i2c_set_clientdata(client, mcli);
+
+ rc = i2c_slave_register(mcli->client, mctp_i2c_slave_cb);
+ if (rc < 0) {
+ dev_err(&client->dev, "i2c register failed %d\n", rc);
+ mcli->client = NULL;
+ i2c_set_clientdata(client, NULL);
+ goto err;
+ }
+
+ return mcli;
+err:
+ if (mcli) {
+ if (mcli->client)
+ i2c_unregister_device(mcli->client);
+ kfree(mcli);
+ }
+ return ERR_PTR(rc);
+}
+
+static void mctp_i2c_free_client(struct mctp_i2c_client *mcli)
+{
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ WARN_ON(!list_empty(&mcli->devs));
+ WARN_ON(mcli->sel); /* sanity check, no locking */
+
+ rc = i2c_slave_unregister(mcli->client);
+ /* Leak if it fails, we can't propagate errors upwards */
+ if (rc < 0)
+ dev_err(&mcli->client->dev, "i2c unregister failed %d\n", rc);
+ else
+ kfree(mcli);
+}
+
+/* Switch the mctp i2c device to receive responses.
+ * Call with sel_lock held
+ */
+static void __mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ assert_spin_locked(&mcli->sel_lock);
+ if (midev)
+ dev_hold(midev->ndev);
+ if (mcli->sel)
+ dev_put(mcli->sel->ndev);
+ mcli->sel = midev;
+}
+
+/* Switch the mctp i2c device to receive responses */
+static void mctp_i2c_device_select(struct mctp_i2c_client *mcli,
+ struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+}
+
+static int mctp_i2c_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ midev = mcli->sel;
+ if (midev)
+ dev_hold(midev->ndev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (!midev)
+ return 0;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (midev->rx_pos < MCTP_I2C_BUFSZ) {
+ midev->rx_buffer[midev->rx_pos] = *val;
+ midev->rx_pos++;
+ } else {
+ midev->ndev->stats.rx_over_errors++;
+ }
+
+ break;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ /* dest_slave as first byte */
+ midev->rx_buffer[0] = mcli->lladdr << 1;
+ midev->rx_pos = 1;
+ break;
+ case I2C_SLAVE_STOP:
+ rc = mctp_i2c_recv(midev);
+ break;
+ default:
+ break;
+ }
+
+ dev_put(midev->ndev);
+ return rc;
+}
+
+/* Processes incoming data that has been accumulated by the slave cb */
+static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
+{
+ struct net_device *ndev = midev->ndev;
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+ unsigned long flags;
+ u8 pec, calc_pec;
+ size_t recvlen;
+ int status;
+
+ /* + 1 for the PEC */
+ if (midev->rx_pos < MCTP_I2C_MINLEN + 1) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+ /* recvlen excludes PEC */
+ recvlen = midev->rx_pos - 1;
+
+ hdr = (void *)midev->rx_buffer;
+ if (hdr->command != MCTP_I2C_COMMANDCODE) {
+ ndev->stats.rx_dropped++;
+ return -EINVAL;
+ }
+
+ if (hdr->byte_count + offsetof(struct mctp_i2c_hdr, source_slave) != recvlen) {
+ ndev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ pec = midev->rx_buffer[midev->rx_pos - 1];
+ calc_pec = i2c_smbus_pec(0, midev->rx_buffer, recvlen);
+ if (pec != calc_pec) {
+ ndev->stats.rx_crc_errors++;
+ return -EINVAL;
+ }
+
+ skb = netdev_alloc_skb(ndev, recvlen);
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, midev->rx_buffer, recvlen);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 1;
+ cb->haddr[0] = hdr->source_slave >> 1;
+
+ /* We need to ensure that the netif is not used once netdev
+ * unregister occurs
+ */
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->allow_rx) {
+ reinit_completion(&midev->rx_done);
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ status = netif_rx(skb);
+ complete(&midev->rx_done);
+ } else {
+ status = NET_RX_DROP;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ }
+
+ if (status == NET_RX_SUCCESS) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += recvlen;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+ return 0;
+}
+
+enum mctp_i2c_flow_state {
+ MCTP_I2C_TX_FLOW_INVALID,
+ MCTP_I2C_TX_FLOW_NONE,
+ MCTP_I2C_TX_FLOW_NEW,
+ MCTP_I2C_TX_FLOW_EXISTING,
+};
+
+static enum mctp_i2c_flow_state
+mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ enum mctp_i2c_flow_state state;
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ key = flow->key;
+ if (!key)
+ return MCTP_I2C_TX_FLOW_NONE;
+
+ spin_lock_irqsave(&key->lock, flags);
+ /* If the key is present but invalid, we're unlikely to be able
+ * to handle the flow at all; just drop now
+ */
+ if (!key->valid) {
+ state = MCTP_I2C_TX_FLOW_INVALID;
+
+ } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) {
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
+ state = MCTP_I2C_TX_FLOW_NEW;
+ } else {
+ state = MCTP_I2C_TX_FLOW_EXISTING;
+ }
+
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ return state;
+}
+
+/* We're not contending with ourselves here; we only need to exclude other
+ * i2c clients from using the bus. refcounts are simply to prevent
+ * recursive locking.
+ */
+static void mctp_i2c_lock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool lock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ lock = midev->i2c_lock_count == 0;
+ midev->i2c_lock_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (lock)
+ i2c_lock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_unlock_nest(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (!WARN_ONCE(midev->i2c_lock_count == 0, "lock count underflow!"))
+ midev->i2c_lock_count--;
+ unlock = midev->i2c_lock_count == 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+/* Unlocks the bus if was previously locked, used for cleanup */
+static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ unlock = midev->i2c_lock_count > 0;
+ midev->i2c_lock_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &midev->ndev->stats;
+ enum mctp_i2c_flow_state fs;
+ struct mctp_i2c_hdr *hdr;
+ struct i2c_msg msg = {0};
+ u8 *pecp;
+ int rc;
+
+ fs = mctp_i2c_get_tx_flow_state(midev, skb);
+
+ hdr = (void *)skb_mac_header(skb);
+ /* Sanity check that packet contents matches skb length,
+ * and can't exceed MCTP_I2C_BUFSZ
+ */
+ if (skb->len != hdr->byte_count + 3) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "Bad tx length %d vs skb %u\n",
+ hdr->byte_count + 3, skb->len);
+ return;
+ }
+
+ if (skb_tailroom(skb) >= 1) {
+ /* Linear case with space, we can just append the PEC */
+ skb_put(skb, 1);
+ } else {
+ /* Otherwise need to copy the buffer */
+ skb_copy_bits(skb, 0, midev->tx_scratch, skb->len);
+ hdr = (void *)midev->tx_scratch;
+ }
+
+ pecp = (void *)&hdr->source_slave + hdr->byte_count;
+ *pecp = i2c_smbus_pec(0, (u8 *)hdr, hdr->byte_count + 3);
+ msg.buf = (void *)&hdr->command;
+ /* command, bytecount, data, pec */
+ msg.len = 2 + hdr->byte_count + 1;
+ msg.addr = hdr->dest_slave >> 1;
+
+ switch (fs) {
+ case MCTP_I2C_TX_FLOW_NONE:
+ /* no flow: full lock & unlock */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ mctp_i2c_unlock_nest(midev);
+ break;
+
+ case MCTP_I2C_TX_FLOW_NEW:
+ /* new flow: lock, tx, but don't unlock; that will happen
+ * on flow release
+ */
+ mctp_i2c_lock_nest(midev);
+ mctp_i2c_device_select(midev->client, midev);
+ fallthrough;
+
+ case MCTP_I2C_TX_FLOW_EXISTING:
+ /* existing flow: we already have the lock; just tx */
+ rc = __i2c_transfer(midev->adapter, &msg, 1);
+ break;
+
+ case MCTP_I2C_TX_FLOW_INVALID:
+ return;
+ }
+
+ if (rc < 0) {
+ dev_warn_ratelimited(&midev->adapter->dev,
+ "__i2c_transfer failed %d\n", rc);
+ stats->tx_errors++;
+ } else {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ }
+}
+
+static void mctp_i2c_flow_release(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+ bool unlock;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ if (midev->release_count > midev->i2c_lock_count) {
+ WARN_ONCE(1, "release count overflow");
+ midev->release_count = midev->i2c_lock_count;
+ }
+
+ midev->i2c_lock_count -= midev->release_count;
+ unlock = midev->i2c_lock_count == 0 && midev->release_count > 0;
+ midev->release_count = 0;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ if (unlock)
+ i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
+}
+
+static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct mctp_i2c_hdr *hdr;
+ struct mctp_hdr *mhdr;
+ u8 lldst, llsrc;
+
+ if (len > MCTP_I2C_MAXMTU)
+ return -EMSGSIZE;
+
+ lldst = *((u8 *)daddr);
+ llsrc = *((u8 *)saddr);
+
+ skb_push(skb, sizeof(struct mctp_i2c_hdr));
+ skb_reset_mac_header(skb);
+ hdr = (void *)skb_mac_header(skb);
+ mhdr = mctp_hdr(skb);
+ hdr->dest_slave = (lldst << 1) & 0xff;
+ hdr->command = MCTP_I2C_COMMANDCODE;
+ hdr->byte_count = len + 1;
+ hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
+ mhdr->ver = 0x01;
+
+ return 0;
+}
+
+static int mctp_i2c_tx_thread(void *data)
+{
+ struct mctp_i2c_dev *midev = data;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ for (;;) {
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ skb = __skb_dequeue(&midev->tx_queue);
+ if (netif_queue_stopped(midev->ndev))
+ netif_wake_queue(midev->ndev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ if (skb == &midev->unlock_marker) {
+ mctp_i2c_flow_release(midev);
+
+ } else if (skb) {
+ mctp_i2c_xmit(midev, skb);
+ kfree_skb(skb);
+
+ } else {
+ wait_event_idle(midev->tx_wq,
+ !skb_queue_empty(&midev->tx_queue) ||
+ kthread_should_stop());
+ }
+ }
+
+ return 0;
+}
+
+static netdev_tx_t mctp_i2c_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->tx_queue.lock, flags);
+ if (skb_queue_len(&midev->tx_queue) >= MCTP_I2C_TX_WORK_LEN) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ __skb_queue_tail(&midev->tx_queue, skb);
+ if (skb_queue_len(&midev->tx_queue) == MCTP_I2C_TX_WORK_LEN)
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&midev->tx_queue.lock, flags);
+
+ wake_up(&midev->tx_wq);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_i2c_release_flow(struct mctp_dev *mdev,
+ struct mctp_sk_key *key)
+
+{
+ struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->release_count++;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ /* Ensure we have a release operation queued, through the fake
+ * marker skb
+ */
+ spin_lock(&midev->tx_queue.lock);
+ if (!midev->unlock_marker.next)
+ __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker);
+ spin_unlock(&midev->tx_queue.lock);
+
+ wake_up(&midev->tx_wq);
+}
+
+static const struct net_device_ops mctp_i2c_ops = {
+ .ndo_start_xmit = mctp_i2c_start_xmit,
+ .ndo_uninit = mctp_i2c_ndo_uninit,
+ .ndo_open = mctp_i2c_ndo_open,
+};
+
+static const struct header_ops mctp_i2c_headops = {
+ .create = mctp_i2c_header_create,
+};
+
+static const struct mctp_netdev_ops mctp_i2c_mctp_ops = {
+ .release_flow = mctp_i2c_release_flow,
+};
+
+static void mctp_i2c_net_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_I2C_MAXMTU;
+ dev->min_mtu = MCTP_I2C_MINMTU;
+ dev->max_mtu = MCTP_I2C_MAXMTU;
+ dev->tx_queue_len = MCTP_I2C_TX_QUEUE_LEN;
+
+ dev->hard_header_len = sizeof(struct mctp_i2c_hdr);
+ dev->addr_len = 1;
+
+ dev->netdev_ops = &mctp_i2c_ops;
+ dev->header_ops = &mctp_i2c_headops;
+}
+
+/* Populates the mctp_i2c_dev priv struct for a netdev.
+ * Returns an error pointer on failure.
+ */
+static struct mctp_i2c_dev *mctp_i2c_midev_init(struct net_device *dev,
+ struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ midev->tx_thread = kthread_create(mctp_i2c_tx_thread, midev,
+ "%s/tx", dev->name);
+ if (IS_ERR(midev->tx_thread))
+ return ERR_CAST(midev->tx_thread);
+
+ midev->ndev = dev;
+ get_device(&adap->dev);
+ midev->adapter = adap;
+ get_device(&mcli->client->dev);
+ midev->client = mcli;
+ INIT_LIST_HEAD(&midev->list);
+ spin_lock_init(&midev->lock);
+ midev->i2c_lock_count = 0;
+ midev->release_count = 0;
+ init_completion(&midev->rx_done);
+ complete(&midev->rx_done);
+ init_waitqueue_head(&midev->tx_wq);
+ skb_queue_head_init(&midev->tx_queue);
+
+ /* Add to the parent mcli */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_add(&midev->list, &mcli->devs);
+ /* Select a device by default */
+ if (!mcli->sel)
+ __mctp_i2c_device_select(mcli, midev);
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ /* Start the worker thread */
+ wake_up_process(midev->tx_thread);
+
+ return midev;
+}
+
+/* Counterpart of mctp_i2c_midev_init */
+static void mctp_i2c_midev_free(struct mctp_i2c_dev *midev)
+{
+ struct mctp_i2c_client *mcli = midev->client;
+ unsigned long flags;
+
+ if (midev->tx_thread) {
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+ }
+
+ /* Unconditionally unlock on close */
+ mctp_i2c_unlock_reset(midev);
+
+ /* Remove the netdev from the parent i2c client. */
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ list_del(&midev->list);
+ if (mcli->sel == midev) {
+ struct mctp_i2c_dev *first;
+
+ first = list_first_entry_or_null(&mcli->devs, struct mctp_i2c_dev, list);
+ __mctp_i2c_device_select(mcli, first);
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ skb_queue_purge(&midev->tx_queue);
+ put_device(&midev->adapter->dev);
+ put_device(&mcli->client->dev);
+}
+
+/* Stops, unregisters, and frees midev */
+static void mctp_i2c_unregister(struct mctp_i2c_dev *midev)
+{
+ unsigned long flags;
+
+ /* Stop tx thread prior to unregister, it uses netif_() functions */
+ kthread_stop(midev->tx_thread);
+ midev->tx_thread = NULL;
+
+ /* Prevent any new rx in mctp_i2c_recv(), let any pending work finish */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+ wait_for_completion(&midev->rx_done);
+
+ mctp_unregister_netdev(midev->ndev);
+ /* midev has been freed now by mctp_i2c_ndo_uninit callback */
+
+ free_netdev(midev->ndev);
+}
+
+static void mctp_i2c_ndo_uninit(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+
+ /* Perform cleanup here to ensure that mcli->sel isn't holding
+ * a reference that would prevent unregister_netdevice()
+ * from completing.
+ */
+ mctp_i2c_midev_free(midev);
+}
+
+static int mctp_i2c_ndo_open(struct net_device *dev)
+{
+ struct mctp_i2c_dev *midev = netdev_priv(dev);
+ unsigned long flags;
+
+ /* i2c rx handler can only pass packets once the netdev is registered */
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = true;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+}
+
+static int mctp_i2c_add_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL;
+ struct net_device *ndev = NULL;
+ struct i2c_adapter *root;
+ unsigned long flags;
+ char namebuf[30];
+ int rc;
+
+ root = mux_root_adapter(adap);
+ if (root != mcli->client->adapter) {
+ dev_err(&mcli->client->dev,
+ "I2C adapter %s is not a child bus of %s\n",
+ mcli->client->adapter->name, root->name);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ snprintf(namebuf, sizeof(namebuf), "mctpi2c%d", adap->nr);
+ ndev = alloc_netdev(sizeof(*midev), namebuf, NET_NAME_ENUM, mctp_i2c_net_setup);
+ if (!ndev) {
+ dev_err(&mcli->client->dev, "alloc netdev failed\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ dev_net_set(ndev, current->nsproxy->net_ns);
+ SET_NETDEV_DEV(ndev, &adap->dev);
+ dev_addr_set(ndev, &mcli->lladdr);
+
+ midev = mctp_i2c_midev_init(ndev, mcli, adap);
+ if (IS_ERR(midev)) {
+ rc = PTR_ERR(midev);
+ midev = NULL;
+ goto err;
+ }
+
+ rc = mctp_register_netdev(ndev, &mctp_i2c_mctp_ops);
+ if (rc < 0) {
+ dev_err(&mcli->client->dev,
+ "register netdev \"%s\" failed %d\n",
+ ndev->name, rc);
+ goto err;
+ }
+
+ spin_lock_irqsave(&midev->lock, flags);
+ midev->allow_rx = false;
+ spin_unlock_irqrestore(&midev->lock, flags);
+
+ return 0;
+err:
+ if (midev)
+ mctp_i2c_midev_free(midev);
+ if (ndev)
+ free_netdev(ndev);
+ return rc;
+}
+
+/* Removes any netdev for adap. mcli is the parent root i2c client */
+static void mctp_i2c_remove_netdev(struct mctp_i2c_client *mcli,
+ struct i2c_adapter *adap)
+{
+ struct mctp_i2c_dev *midev = NULL, *m = NULL;
+ unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&driver_clients_lock));
+ spin_lock_irqsave(&mcli->sel_lock, flags);
+ /* List size is limited by number of MCTP netdevs on a single hardware bus */
+ list_for_each_entry(m, &mcli->devs, list)
+ if (m->adapter == adap) {
+ midev = m;
+ break;
+ }
+ spin_unlock_irqrestore(&mcli->sel_lock, flags);
+
+ if (midev)
+ mctp_i2c_unregister(midev);
+}
+
+/* Determines whether a device is an i2c adapter.
+ * Optionally returns the root i2c_adapter
+ */
+static struct i2c_adapter *mctp_i2c_get_adapter(struct device *dev,
+ struct i2c_adapter **ret_root)
+{
+ struct i2c_adapter *root, *adap;
+
+ if (dev->type != &i2c_adapter_type)
+ return NULL;
+ adap = to_i2c_adapter(dev);
+ root = mux_root_adapter(adap);
+ WARN_ONCE(!root, "MCTP I2C failed to find root adapter for %s\n",
+ dev_name(dev));
+ if (!root)
+ return NULL;
+ if (ret_root)
+ *ret_root = root;
+ return adap;
+}
+
+/* Determines whether a device is an i2c adapter with the "mctp-controller"
+ * devicetree property set. If adap is not an OF node, returns match_no_of
+ */
+static bool mctp_i2c_adapter_match(struct i2c_adapter *adap, bool match_no_of)
+{
+ if (!adap->dev.of_node)
+ return match_no_of;
+ return of_property_read_bool(adap->dev.of_node, MCTP_I2C_OF_PROP);
+}
+
+/* Called for each existing i2c device (adapter or client) when a
+ * new mctp-i2c client is probed.
+ */
+static int mctp_i2c_client_try_attach(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap = NULL, *root = NULL;
+ struct mctp_i2c_client *mcli = data;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return 0;
+ if (mcli->client->adapter != root)
+ return 0;
+ /* Must either have mctp-controller property on the adapter, or
+ * be a root adapter if it's non-devicetree
+ */
+ if (!mctp_i2c_adapter_match(adap, adap == root))
+ return 0;
+
+ return mctp_i2c_add_netdev(mcli, adap);
+}
+
+static void mctp_i2c_notify_add(struct device *dev)
+{
+ struct mctp_i2c_client *mcli = NULL, *m = NULL;
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ int rc;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+ /* Check for mctp-controller property on the adapter */
+ if (!mctp_i2c_adapter_match(adap, false))
+ return;
+
+ /* Find an existing mcli for adap's root */
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(m, &driver_clients, list) {
+ if (m->client->adapter == root) {
+ mcli = m;
+ break;
+ }
+ }
+
+ if (mcli) {
+ rc = mctp_i2c_add_netdev(mcli, adap);
+ if (rc < 0)
+ dev_warn(dev, "Failed adding mctp-i2c net device\n");
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static void mctp_i2c_notify_del(struct device *dev)
+{
+ struct i2c_adapter *root = NULL, *adap = NULL;
+ struct mctp_i2c_client *mcli = NULL;
+
+ adap = mctp_i2c_get_adapter(dev, &root);
+ if (!adap)
+ return;
+
+ mutex_lock(&driver_clients_lock);
+ list_for_each_entry(mcli, &driver_clients, list) {
+ if (mcli->client->adapter == root) {
+ mctp_i2c_remove_netdev(mcli, adap);
+ break;
+ }
+ }
+ mutex_unlock(&driver_clients_lock);
+}
+
+static int mctp_i2c_probe(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = NULL;
+ int rc;
+
+ mutex_lock(&driver_clients_lock);
+ mcli = mctp_i2c_new_client(client);
+ if (IS_ERR(mcli)) {
+ rc = PTR_ERR(mcli);
+ mcli = NULL;
+ goto out;
+ } else {
+ list_add(&mcli->list, &driver_clients);
+ }
+
+ /* Add a netdev for adapters that have a 'mctp-controller' property */
+ i2c_for_each_dev(mcli, mctp_i2c_client_try_attach);
+ rc = 0;
+out:
+ mutex_unlock(&driver_clients_lock);
+ return rc;
+}
+
+static int mctp_i2c_remove(struct i2c_client *client)
+{
+ struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
+ struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
+
+ mutex_lock(&driver_clients_lock);
+ list_del(&mcli->list);
+ /* Remove all child adapter netdevs */
+ list_for_each_entry_safe(midev, tmp, &mcli->devs, list)
+ mctp_i2c_unregister(midev);
+
+ mctp_i2c_free_client(mcli);
+ mutex_unlock(&driver_clients_lock);
+ /* Callers ignore return code */
+ return 0;
+}
+
+/* We look for a 'mctp-controller' property on I2C busses as they are
+ * added/deleted, creating/removing netdevs as required.
+ */
+static int mctp_i2c_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ mctp_i2c_notify_add(dev);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ mctp_i2c_notify_del(dev);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mctp_i2c_notifier = {
+ .notifier_call = mctp_i2c_notifier_call,
+};
+
+static const struct i2c_device_id mctp_i2c_id[] = {
+ { "mctp-i2c-interface", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
+
+static const struct of_device_id mctp_i2c_of_match[] = {
+ { .compatible = "mctp-i2c-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mctp_i2c_of_match);
+
+static struct i2c_driver mctp_i2c_driver = {
+ .driver = {
+ .name = "mctp-i2c-interface",
+ .of_match_table = mctp_i2c_of_match,
+ },
+ .probe_new = mctp_i2c_probe,
+ .remove = mctp_i2c_remove,
+ .id_table = mctp_i2c_id,
+};
+
+static __init int mctp_i2c_mod_init(void)
+{
+ int rc;
+
+ pr_info("MCTP I2C interface driver\n");
+ rc = i2c_add_driver(&mctp_i2c_driver);
+ if (rc < 0)
+ return rc;
+ rc = bus_register_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0) {
+ i2c_del_driver(&mctp_i2c_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static __exit void mctp_i2c_mod_exit(void)
+{
+ int rc;
+
+ rc = bus_unregister_notifier(&i2c_bus_type, &mctp_i2c_notifier);
+ if (rc < 0)
+ pr_warn("MCTP I2C could not unregister notifier, %d\n", rc);
+ i2c_del_driver(&mctp_i2c_driver);
+}
+
+module_init(mctp_i2c_mod_init);
+module_exit(mctp_i2c_mod_exit);
+
+MODULE_DESCRIPTION("MCTP I2C device");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matt Johnston <matt@codeconstruct.com.au>");
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index 62723a7faa2d..7cd103fd34ef 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -286,7 +286,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
cb = __mctp_cb(skb);
cb->halen = 0;
- netif_rx_ni(skb);
+ netif_rx(skb);
dev->netdev->stats.rx_packets++;
dev->netdev->stats.rx_bytes += dev->rxlen;
}
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 64fb76c1e395..c483ba67c21f 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -15,6 +15,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#define MSCC_MIIM_REG_STATUS 0x0
@@ -36,11 +37,19 @@
#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
#define MSCC_PHY_REG_PHY_STATUS 0x4
+#define LAN966X_CUPHY_COMMON_CFG 0x0
+#define CUPHY_COMMON_CFG_RESET_N BIT(0)
+
+struct mscc_miim_info {
+ unsigned int phy_reset_offset;
+ unsigned int phy_reset_bits;
+};
+
struct mscc_miim_dev {
struct regmap *regs;
int mii_status_offset;
struct regmap *phy_regs;
- int phy_reset_offset;
+ const struct mscc_miim_info *info;
};
/* When high resolution timers aren't built-in: we can't use usleep_range() as
@@ -157,27 +166,29 @@ out:
static int mscc_miim_reset(struct mii_bus *bus)
{
struct mscc_miim_dev *miim = bus->priv;
- int offset = miim->phy_reset_offset;
+ unsigned int offset, bits;
int ret;
- if (miim->phy_regs) {
- ret = regmap_write(miim->phy_regs,
- MSCC_PHY_REG_PHY_CFG + offset, 0);
- if (ret < 0) {
- WARN_ONCE(1, "mscc reset set error %d\n", ret);
- return ret;
- }
+ if (!miim->phy_regs)
+ return 0;
- ret = regmap_write(miim->phy_regs,
- MSCC_PHY_REG_PHY_CFG + offset, 0x1ff);
- if (ret < 0) {
- WARN_ONCE(1, "mscc reset clear error %d\n", ret);
- return ret;
- }
+ offset = miim->info->phy_reset_offset;
+ bits = miim->info->phy_reset_bits;
+
+ ret = regmap_update_bits(miim->phy_regs, offset, bits, 0);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset set error %d\n", ret);
+ return ret;
+ }
- mdelay(500);
+ ret = regmap_update_bits(miim->phy_regs, offset, bits, bits);
+ if (ret < 0) {
+ WARN_ONCE(1, "mscc reset clear error %d\n", ret);
+ return ret;
}
+ mdelay(500);
+
return 0;
}
@@ -272,7 +283,10 @@ static int mscc_miim_probe(struct platform_device *pdev)
miim = bus->priv;
miim->phy_regs = phy_regmap;
- miim->phy_reset_offset = 0;
+
+ miim->info = device_get_match_data(&pdev->dev);
+ if (!miim->info)
+ return -EINVAL;
ret = of_mdiobus_register(bus, pdev->dev.of_node);
if (ret < 0) {
@@ -294,8 +308,25 @@ static int mscc_miim_remove(struct platform_device *pdev)
return 0;
}
+static const struct mscc_miim_info mscc_ocelot_miim_info = {
+ .phy_reset_offset = MSCC_PHY_REG_PHY_CFG,
+ .phy_reset_bits = PHY_CFG_PHY_ENA | PHY_CFG_PHY_COMMON_RESET |
+ PHY_CFG_PHY_RESET,
+};
+
+static const struct mscc_miim_info microchip_lan966x_miim_info = {
+ .phy_reset_offset = LAN966X_CUPHY_COMMON_CFG,
+ .phy_reset_bits = CUPHY_COMMON_CFG_RESET_N,
+};
+
static const struct of_device_id mscc_miim_match[] = {
- { .compatible = "mscc,ocelot-miim" },
+ {
+ .compatible = "mscc,ocelot-miim",
+ .data = &mscc_ocelot_miim_info
+ }, {
+ .compatible = "microchip,lan966x-miim",
+ .data = &microchip_lan966x_miim_info
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mscc_miim_match);
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index ebd001f0eece..a881e3523328 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -168,8 +168,8 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->priv = cb;
cb->mii_bus->name = "mdio_mux";
- snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
- pb->parent_id, v);
+ snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x",
+ cb->mii_bus->name, pb->parent_id, v);
cb->mii_bus->parent = dev;
cb->mii_bus->read = mdio_mux_read;
cb->mii_bus->write = mdio_mux_write;
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 7ab4e26db08c..7aafc221b5cf 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -285,7 +285,8 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
const union acpi_object *obj;
u32 phy_addr;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
return AE_OK;
if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj))
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index aaa628f859fd..0b1b6f650104 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -225,7 +225,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
u64_stats_inc(&mhi_netdev->stats.rx_packets);
u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- netif_rx(skb);
+ __netif_rx(skb);
}
/* Refill if RX buffers queue becomes low */
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 86ec5aae4289..21a0435c02de 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -89,7 +89,7 @@ static int net_failover_close(struct net_device *dev)
static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index a1cbfa44a1e1..5735e5b1a2cb 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
+ netdev.o dev.o ethtool.o fib.o bus.o health.o hwstats.o udp_tunnels.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 08d7b465a0de..57a3ac893792 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -59,7 +59,7 @@ static struct dentry *nsim_dev_ddir;
unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev)
{
WARN_ON(!lockdep_rtnl_is_held() &&
- !lockdep_is_held(&nsim_dev->vfs_lock));
+ !devl_lock_is_held(priv_to_devlink(nsim_dev)));
return nsim_dev->nsim_bus_dev->num_vfs;
}
@@ -275,7 +275,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
return -ENOMEM;
nsim_dev = file->private_data;
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
/* Reject if VFs are configured */
if (nsim_dev_get_vfs(nsim_dev)) {
ret = -EBUSY;
@@ -285,7 +285,7 @@ static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
*ppos += count;
ret = count;
}
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
kfree(vfconfigs);
return ret;
@@ -339,6 +339,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
+ /* caution, dev_max_vfs write takes devlink lock */
debugfs_create_file("max_vfs", 0600, nsim_dev->ddir,
nsim_dev, &nsim_dev_max_vfs_fops);
@@ -567,6 +568,9 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
devlink_region_destroy(nsim_dev->dummy_region);
}
+static int
+__nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
+ unsigned int port_index);
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
@@ -575,12 +579,10 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
struct devlink *devlink = priv_to_devlink(nsim_dev);
struct nsim_dev_port *nsim_dev_port, *tmp;
- devlink_rate_nodes_destroy(devlink);
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_rate_nodes_destroy(devlink);
list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
if (nsim_dev_port_is_vf(nsim_dev_port))
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
return 0;
}
@@ -588,11 +590,11 @@ static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack)
{
- struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct nsim_dev_port *nsim_dev_port, *tmp;
int i, err;
for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
- err = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
@@ -603,8 +605,9 @@ static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
return 0;
err_port_add_vfs:
- for (i--; i >= 0; i--)
- nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ __nsim_dev_port_del(nsim_dev_port);
return err;
}
@@ -612,22 +615,16 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- int err = 0;
- mutex_lock(&nsim_dev->vfs_lock);
if (mode == nsim_dev->esw_mode)
- goto unlock;
+ return 0;
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- err = nsim_esw_legacy_enable(nsim_dev, extack);
- else if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- err = nsim_esw_switchdev_enable(nsim_dev, extack);
- else
- err = -EINVAL;
+ return nsim_esw_legacy_enable(nsim_dev, extack);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return nsim_esw_switchdev_enable(nsim_dev, extack);
-unlock:
- mutex_unlock(&nsim_dev->vfs_lock);
- return err;
+ return -EINVAL;
}
static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
@@ -835,14 +832,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
/* For each running port and enabled packet trap, generate a UDP
* packet with a random 5-tuple and report it.
*/
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
nsim_dev_trap_report(nsim_dev_port);
}
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
@@ -924,6 +921,7 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ /* caution, trap work takes devlink lock */
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
devlink_traps_unregister(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr));
@@ -1380,8 +1378,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
devlink_port_attrs_set(devlink_port, &attrs);
- err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
- nsim_dev_port->port_index);
+ err = devl_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ nsim_dev_port->port_index);
if (err)
goto err_port_free;
@@ -1396,8 +1394,8 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_typ
}
if (nsim_dev_port_is_vf(nsim_dev_port)) {
- err = devlink_rate_leaf_create(&nsim_dev_port->devlink_port,
- nsim_dev_port);
+ err = devl_rate_leaf_create(&nsim_dev_port->devlink_port,
+ nsim_dev_port);
if (err)
goto err_nsim_destroy;
}
@@ -1412,7 +1410,7 @@ err_nsim_destroy:
err_port_debugfs_exit:
nsim_dev_port_debugfs_exit(nsim_dev_port);
err_dl_port_unregister:
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
err_port_free:
kfree(nsim_dev_port);
return err;
@@ -1424,11 +1422,11 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
list_del(&nsim_dev_port->list);
if (nsim_dev_port_is_vf(nsim_dev_port))
- devlink_rate_leaf_destroy(&nsim_dev_port->devlink_port);
+ devl_rate_leaf_destroy(&nsim_dev_port->devlink_port);
devlink_port_type_clear(devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
- devlink_port_unregister(devlink_port);
+ devl_port_unregister(devlink_port);
kfree(nsim_dev_port);
}
@@ -1436,11 +1434,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
{
struct nsim_dev_port *nsim_dev_port, *tmp;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
list_for_each_entry_safe(nsim_dev_port, tmp,
&nsim_dev->port_list, list)
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
}
static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
@@ -1449,7 +1447,9 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < port_count; i++) {
+ devl_lock(priv_to_devlink(nsim_dev));
err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
+ devl_unlock(priv_to_devlink(nsim_dev));
if (err)
goto err_port_del_all;
}
@@ -1470,7 +1470,6 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
devlink = priv_to_devlink(nsim_dev);
nsim_dev = devlink_priv(devlink);
INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
@@ -1498,10 +1497,14 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
if (err)
goto err_health_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_hwstats_exit;
+
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
@@ -1509,6 +1512,8 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
&nsim_dev_take_snapshot_fops);
return 0;
+err_hwstats_exit:
+ nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_health_exit:
@@ -1537,8 +1542,6 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
- mutex_init(&nsim_dev->vfs_lock);
- mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
@@ -1595,15 +1598,21 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_bpf_dev_exit;
- err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_hwstats_exit;
+
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
+err_hwstats_exit:
+ nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_bpf_dev_exit:
@@ -1639,21 +1648,21 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
return;
debugfs_remove(nsim_dev->take_snapshot);
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(devlink);
if (nsim_dev_get_vfs(nsim_dev)) {
nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
if (nsim_esw_mode_is_switchdev(nsim_dev))
nsim_esw_legacy_enable(nsim_dev, NULL);
}
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(devlink);
nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_hwstats_exit(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_fib_destroy(devlink, nsim_dev->fib_data);
nsim_dev_traps_exit(devlink);
nsim_dev_dummy_region_exit(nsim_dev);
- mutex_destroy(&nsim_dev->port_list_lock);
}
void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
@@ -1693,12 +1702,12 @@ int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
err = __nsim_dev_port_add(nsim_dev, type, port_index);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
@@ -1709,13 +1718,13 @@ int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
struct nsim_dev_port *nsim_dev_port;
int err = 0;
- mutex_lock(&nsim_dev->port_list_lock);
+ devl_lock(priv_to_devlink(nsim_dev));
nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
if (!nsim_dev_port)
err = -ENOENT;
else
__nsim_dev_port_del(nsim_dev_port);
- mutex_unlock(&nsim_dev->port_list_lock);
+ devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
@@ -1723,9 +1732,10 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
unsigned int num_vfs)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
int ret = 0;
- mutex_lock(&nsim_dev->vfs_lock);
+ devl_lock(devlink);
if (nsim_bus_dev->num_vfs == num_vfs)
goto exit_unlock;
if (nsim_bus_dev->num_vfs && num_vfs) {
@@ -1751,7 +1761,7 @@ int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
}
exit_unlock:
- mutex_unlock(&nsim_dev->vfs_lock);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
new file mode 100644
index 000000000000..605a38e16db0
--- /dev/null
+++ b/drivers/net/netdevsim/hwstats.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debugfs.h>
+
+#include "netdevsim.h"
+
+#define NSIM_DEV_HWSTATS_TRAFFIC_MS 100
+
+static struct list_head *
+nsim_dev_hwstats_get_list_head(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ switch (type) {
+ case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
+ return &hwstats->l3_list;
+ }
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void nsim_dev_hwstats_traffic_bump(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ list_for_each_entry(hwsdev, hwsdev_list, list) {
+ if (hwsdev->enabled) {
+ hwsdev->stats.rx_packets += 1;
+ hwsdev->stats.tx_packets += 2;
+ hwsdev->stats.rx_bytes += 100;
+ hwsdev->stats.tx_bytes += 300;
+ }
+ }
+}
+
+static void nsim_dev_hwstats_traffic_work(struct work_struct *work)
+{
+ struct nsim_dev_hwstats *hwstats;
+
+ hwstats = container_of(work, struct nsim_dev_hwstats, traffic_dw.work);
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ nsim_dev_hwstats_traffic_bump(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ schedule_delayed_work(&hwstats->traffic_dw,
+ msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+}
+
+static struct nsim_dev_hwstats_netdev *
+nsim_dev_hwslist_find_hwsdev(struct list_head *hwsdev_list,
+ int ifindex)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+
+ list_for_each_entry(hwsdev, hwsdev_list, list) {
+ if (hwsdev->netdev->ifindex == ifindex)
+ return hwsdev;
+ }
+
+ return NULL;
+}
+
+static int nsim_dev_hwsdev_enable(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netlink_ext_ack *extack)
+{
+ if (hwsdev->fail_enable) {
+ hwsdev->fail_enable = false;
+ NL_SET_ERR_MSG_MOD(extack, "Stats enablement set to fail");
+ return -ECANCELED;
+ }
+
+ hwsdev->enabled = true;
+ return 0;
+}
+
+static void nsim_dev_hwsdev_disable(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+ hwsdev->enabled = false;
+ memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+}
+
+static int
+nsim_dev_hwsdev_report_delta(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ netdev_offload_xstats_report_delta(info->report_delta, &hwsdev->stats);
+ memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
+ return 0;
+}
+
+static void
+nsim_dev_hwsdev_report_used(struct nsim_dev_hwstats_netdev *hwsdev,
+ struct netdev_notifier_offload_xstats_info *info)
+{
+ if (hwsdev->enabled)
+ netdev_offload_xstats_report_used(info->report_used);
+}
+
+static int nsim_dev_hwstats_event_off_xstats(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_offload_xstats_info *info;
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+ int err = 0;
+
+ info = ptr;
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, info->type);
+ if (!hwsdev_list)
+ return 0;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+ if (!hwsdev)
+ goto out;
+
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ err = nsim_dev_hwsdev_enable(hwsdev, info->info.extack);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ nsim_dev_hwsdev_disable(hwsdev);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ nsim_dev_hwsdev_report_used(hwsdev, info);
+ break;
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ err = nsim_dev_hwsdev_report_delta(hwsdev, info);
+ break;
+ }
+
+out:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+static void nsim_dev_hwsdev_fini(struct nsim_dev_hwstats_netdev *hwsdev)
+{
+ dev_put(hwsdev->netdev);
+ kfree(hwsdev);
+}
+
+static void
+__nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
+ if (!hwsdev)
+ return;
+
+ list_del(&hwsdev->list);
+ nsim_dev_hwsdev_fini(hwsdev);
+}
+
+static void nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev)
+{
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ __nsim_dev_hwstats_event_unregister(hwstats, dev,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+static int nsim_dev_hwstats_event(struct nsim_dev_hwstats *hwstats,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_OFFLOAD_XSTATS_ENABLE:
+ case NETDEV_OFFLOAD_XSTATS_DISABLE:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
+ case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
+ return nsim_dev_hwstats_event_off_xstats(hwstats, dev,
+ event, ptr);
+ case NETDEV_UNREGISTER:
+ nsim_dev_hwstats_event_unregister(hwstats, dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int nsim_dev_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nsim_dev_hwstats *hwstats;
+ int err = 0;
+
+ hwstats = container_of(nb, struct nsim_dev_hwstats, netdevice_nb);
+ err = nsim_dev_hwstats_event(hwstats, dev, event, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
+ return NOTIFY_OK;
+}
+
+static int
+nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ struct nsim_dev *nsim_dev;
+ struct net_device *netdev;
+ bool notify = false;
+ struct net *net;
+ int err = 0;
+
+ nsim_dev = container_of(hwstats, struct nsim_dev, hwstats);
+ net = nsim_dev_net(nsim_dev);
+
+ rtnl_lock();
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (hwsdev)
+ goto out_unlock_list;
+
+ netdev = dev_get_by_index(net, ifindex);
+ if (!netdev) {
+ err = -ENODEV;
+ goto out_unlock_list;
+ }
+
+ hwsdev = kzalloc(sizeof(*hwsdev), GFP_KERNEL);
+ if (!hwsdev) {
+ err = -ENOMEM;
+ goto out_put_netdev;
+ }
+
+ hwsdev->netdev = netdev;
+ list_add_tail(&hwsdev->list, hwsdev_list);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ if (netdev_offload_xstats_enabled(netdev, type)) {
+ nsim_dev_hwsdev_enable(hwsdev, NULL);
+ notify = true;
+ }
+
+ if (notify)
+ rtnl_offload_xstats_notify(netdev);
+ rtnl_unlock();
+ return err;
+
+out_put_netdev:
+ dev_put(netdev);
+out_unlock_list:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ rtnl_unlock();
+ return err;
+}
+
+static int
+nsim_dev_hwstats_disable_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ int err = 0;
+
+ rtnl_lock();
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (hwsdev)
+ list_del(&hwsdev->list);
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+
+ if (!hwsdev) {
+ err = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (netdev_offload_xstats_enabled(hwsdev->netdev, type)) {
+ netdev_offload_xstats_push_delta(hwsdev->netdev, type,
+ &hwsdev->stats);
+ rtnl_offload_xstats_notify(hwsdev->netdev);
+ }
+ nsim_dev_hwsdev_fini(hwsdev);
+
+unlock_out:
+ rtnl_unlock();
+ return err;
+}
+
+static int
+nsim_dev_hwstats_fail_ifindex(struct nsim_dev_hwstats *hwstats,
+ int ifindex,
+ enum netdev_offload_xstats_type type,
+ struct list_head *hwsdev_list)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev;
+ int err = 0;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+
+ hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
+ if (!hwsdev) {
+ err = -ENOENT;
+ goto err_hwsdev_list_unlock;
+ }
+
+ hwsdev->fail_enable = true;
+
+err_hwsdev_list_unlock:
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+enum nsim_dev_hwstats_do {
+ NSIM_DEV_HWSTATS_DO_DISABLE,
+ NSIM_DEV_HWSTATS_DO_ENABLE,
+ NSIM_DEV_HWSTATS_DO_FAIL,
+};
+
+struct nsim_dev_hwstats_fops {
+ const struct file_operations fops;
+ enum nsim_dev_hwstats_do action;
+ enum netdev_offload_xstats_type type;
+};
+
+static ssize_t
+nsim_dev_hwstats_do_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_hwstats *hwstats = file->private_data;
+ struct nsim_dev_hwstats_fops *hwsfops;
+ struct list_head *hwsdev_list;
+ int ifindex;
+ int err;
+
+ hwsfops = container_of(debugfs_real_fops(file),
+ struct nsim_dev_hwstats_fops, fops);
+
+ err = kstrtoint_from_user(data, count, 0, &ifindex);
+ if (err)
+ return err;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, hwsfops->type);
+ if (WARN_ON(!hwsdev_list))
+ return -EINVAL;
+
+ switch (hwsfops->action) {
+ case NSIM_DEV_HWSTATS_DO_DISABLE:
+ err = nsim_dev_hwstats_disable_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ case NSIM_DEV_HWSTATS_DO_ENABLE:
+ err = nsim_dev_hwstats_enable_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ case NSIM_DEV_HWSTATS_DO_FAIL:
+ err = nsim_dev_hwstats_fail_ifindex(hwstats, ifindex,
+ hwsfops->type,
+ hwsdev_list);
+ break;
+ }
+ if (err)
+ return err;
+
+ return count;
+}
+
+#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
+ { \
+ .fops = { \
+ .open = simple_open, \
+ .write = nsim_dev_hwstats_do_write, \
+ .llseek = generic_file_llseek, \
+ .owner = THIS_MODULE, \
+ }, \
+ .action = ACTION, \
+ .type = TYPE, \
+ }
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_disable_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_enable_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_fail_fops =
+ NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_FAIL,
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+
+#undef NSIM_DEV_HWSTATS_FOPS
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+ struct net *net = nsim_dev_net(nsim_dev);
+ int err;
+
+ mutex_init(&hwstats->hwsdev_list_lock);
+ INIT_LIST_HEAD(&hwstats->l3_list);
+
+ hwstats->netdevice_nb.notifier_call = nsim_dev_netdevice_event;
+ err = register_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+ if (err)
+ goto err_mutex_destroy;
+
+ hwstats->ddir = debugfs_create_dir("hwstats", nsim_dev->ddir);
+ if (IS_ERR(hwstats->ddir)) {
+ err = PTR_ERR(hwstats->ddir);
+ goto err_unregister_notifier;
+ }
+
+ hwstats->l3_ddir = debugfs_create_dir("l3", hwstats->ddir);
+ if (IS_ERR(hwstats->l3_ddir)) {
+ err = PTR_ERR(hwstats->l3_ddir);
+ goto err_remove_hwstats_recursive;
+ }
+
+ debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_enable_fops.fops);
+ debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_disable_fops.fops);
+ debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+ &nsim_dev_hwstats_l3_fail_fops.fops);
+
+ INIT_DELAYED_WORK(&hwstats->traffic_dw,
+ &nsim_dev_hwstats_traffic_work);
+ schedule_delayed_work(&hwstats->traffic_dw,
+ msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
+ return 0;
+
+err_remove_hwstats_recursive:
+ debugfs_remove_recursive(hwstats->ddir);
+err_unregister_notifier:
+ unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+err_mutex_destroy:
+ mutex_destroy(&hwstats->hwsdev_list_lock);
+ return err;
+}
+
+static void nsim_dev_hwsdev_list_wipe(struct nsim_dev_hwstats *hwstats,
+ enum netdev_offload_xstats_type type)
+{
+ struct nsim_dev_hwstats_netdev *hwsdev, *tmp;
+ struct list_head *hwsdev_list;
+
+ hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
+ if (WARN_ON(!hwsdev_list))
+ return;
+
+ mutex_lock(&hwstats->hwsdev_list_lock);
+ list_for_each_entry_safe(hwsdev, tmp, hwsdev_list, list) {
+ list_del(&hwsdev->list);
+ nsim_dev_hwsdev_fini(hwsdev);
+ }
+ mutex_unlock(&hwstats->hwsdev_list_lock);
+}
+
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
+ struct net *net = nsim_dev_net(nsim_dev);
+
+ cancel_delayed_work_sync(&hwstats->traffic_dw);
+ debugfs_remove_recursive(hwstats->ddir);
+ unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
+ nsim_dev_hwsdev_list_wipe(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
+ mutex_destroy(&hwstats->hwsdev_list_lock);
+}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index c49771f27f17..0b122872b2c9 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -184,6 +184,28 @@ struct nsim_dev_health {
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+struct nsim_dev_hwstats_netdev {
+ struct list_head list;
+ struct net_device *netdev;
+ struct rtnl_hw_stats64 stats;
+ bool enabled;
+ bool fail_enable;
+};
+
+struct nsim_dev_hwstats {
+ struct dentry *ddir;
+ struct dentry *l3_ddir;
+
+ struct mutex hwsdev_list_lock; /* protects hwsdev list(s) */
+ struct list_head l3_list;
+
+ struct notifier_block netdevice_nb;
+ struct delayed_work traffic_dw;
+};
+
+int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev);
+void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev);
+
#if IS_ENABLED(CONFIG_PSAMPLE)
int nsim_dev_psample_init(struct nsim_dev *nsim_dev);
void nsim_dev_psample_exit(struct nsim_dev *nsim_dev);
@@ -239,7 +261,6 @@ struct nsim_dev {
struct dentry *take_snapshot;
struct dentry *nodes_ddir;
- struct mutex vfs_lock; /* Protects vfconfigs */
struct nsim_vf_config *vfconfigs;
struct bpf_offload_dev *bpf_dev;
@@ -252,7 +273,6 @@ struct nsim_dev {
struct list_head bpf_bound_maps;
struct netdev_phys_item_id switch_id;
struct list_head port_list;
- struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
u32 fw_update_overwrite_mask;
u32 max_macs;
@@ -261,6 +281,7 @@ struct nsim_dev {
bool fail_reload;
struct devlink_region *dummy_region;
struct nsim_dev_health health;
+ struct nsim_dev_hwstats hwstats;
struct flow_action_cookie *fa_cookie;
spinlock_t fa_cookie_lock; /* protects fa_cookie */
bool fail_trap_group_set;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 98ca6b18415e..80bdc07f2cd3 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
- if (netif_rx(skb) == NET_RX_DROP) {
+ if (__netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index cd6742e6ba8b..61418d4dc0cd 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -632,35 +632,43 @@ static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
}
}
-void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
- struct phylink_link_state *state)
+static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, };
const struct xpcs_compat *compat;
+ struct dw_xpcs *xpcs;
int i;
- /* phylink expects us to report all supported modes with
- * PHY_INTERFACE_MODE_NA, just don't limit the supported and
- * advertising masks and exit.
- */
- if (state->interface == PHY_INTERFACE_MODE_NA)
- return;
-
- linkmode_zero(xpcs_supported);
-
+ xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
- /* Populate the supported link modes for this
- * PHY interface type
+ /* Populate the supported link modes for this PHY interface type.
+ * FIXME: what about the port modes and autoneg bit? This masks
+ * all those away.
*/
if (compat)
for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
- linkmode_and(state->advertising, state->advertising, xpcs_supported);
+
+ return 0;
+}
+
+void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+{
+ int i, j;
+
+ for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
+ const struct xpcs_compat *compat = &xpcs->id->compat[i];
+
+ for (j = 0; j < compat->num_interfaces; j++)
+ if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
+ __set_bit(compat->interface[j], interfaces);
+ }
}
-EXPORT_SYMBOL_GPL(xpcs_validate);
+EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
@@ -1106,6 +1114,7 @@ static const struct xpcs_id xpcs_id_list[] = {
};
static const struct phylink_pcs_ops xpcs_phylink_ops = {
+ .pcs_validate = xpcs_validate,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
.pcs_link_up = xpcs_link_up,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 902495afcb38..ea7571a2b39b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -220,6 +220,7 @@ config MEDIATEK_GE_PHY
config MICREL_PHY
tristate "Micrel PHYs"
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Supports the KSZ9021, VSC8201, KS8001 PHYs.
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 968dd43a2b1e..a8db1a19011b 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -533,9 +533,7 @@ static int aqcs109_config_init(struct phy_device *phydev)
* PMA speed ability bits are the same for all members of the family,
* AQCS109 however supports speeds up to 2.5G only.
*/
- ret = phy_set_max_speed(phydev, SPEED_2500);
- if (ret)
- return ret;
+ phy_set_max_speed(phydev, SPEED_2500);
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 29aa811af430..73926006d319 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -19,6 +19,8 @@
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/consumer.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
#include <dt-bindings/net/qca-ar803x.h>
#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
@@ -51,6 +53,8 @@
#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
#define AT803X_INTR_ENABLE_WOL BIT(0)
@@ -85,7 +89,17 @@
#define AT803X_DEBUG_DATA 0x1E
#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_SGMII 0x01
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
@@ -283,6 +297,8 @@ struct at803x_priv {
u16 clk_25m_mask;
u8 smarteee_lpi_tw_1g;
u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
struct regulator *vddio;
@@ -650,6 +666,55 @@ static int at8031_register_regulators(struct phy_device *phydev)
return 0;
}
+static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at803x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at803x_sfp_insert,
+};
+
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -757,6 +822,11 @@ static int at803x_parse_dt(struct phy_device *phydev)
phydev_err(phydev, "failed to get VDDIO regulator\n");
return PTR_ERR(priv->vddio);
}
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -784,16 +854,24 @@ static int at803x_probe(struct phy_device *phydev)
return ret;
}
- /* Some bootloaders leave the fiber page selected.
- * Switch to the copper page, as otherwise we read
- * the PHY capabilities from the fiber side.
- */
if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev, AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
+ int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ int mode_cfg;
+
+ if (ccr < 0)
goto err;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
}
return 0;
@@ -815,6 +893,7 @@ static void at803x_remove(struct phy_device *phydev)
static int at803x_get_features(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err;
err = genphy_read_abilities(phydev);
@@ -841,12 +920,13 @@ static int at803x_get_features(struct phy_device *phydev)
* As a result of that, ESTATUS_1000_XFULL is set
* to 1 even when operating in copper TP mode.
*
- * Remove this mode from the supported link modes,
- * as this driver currently only supports copper
- * operation.
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
return 0;
}
@@ -910,8 +990,27 @@ static int at8031_pll_config(struct phy_device *phydev)
static int at803x_config_init(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int ret;
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
@@ -941,12 +1040,6 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
- }
-
/* Ar803x extended next page bit is enabled by default. Cisco
* multigig switches read this bit and attempt to negotiate 10Gbps
* rates even if the next page bit is disabled. This is incorrect
@@ -967,6 +1060,7 @@ static int at803x_ack_interrupt(struct phy_device *phydev)
static int at803x_config_intr(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err;
int value;
@@ -983,6 +1077,10 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
value |= AT803X_INTR_ENABLE_LINK_FAIL;
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+ if (priv->is_fiber) {
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+ }
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
} else {
@@ -1115,8 +1213,12 @@ static int at803x_read_specific_status(struct phy_device *phydev)
static int at803x_read_status(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int err, old_link = phydev->link;
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev);
+
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
@@ -1170,6 +1272,7 @@ static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
static int at803x_config_aneg(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
int ret;
ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
@@ -1186,6 +1289,9 @@ static int at803x_config_aneg(struct phy_device *phydev)
return ret;
}
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
/* Do not restart auto-negotiation by setting ret to 0 defautly,
* when calling __genphy_config_aneg later.
*/
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index c2d1a85ec559..ef8b14135133 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -886,7 +886,7 @@ out:
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
if (shhwtstamps)
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void decode_txts(struct dp83640_private *dp83640,
@@ -970,17 +970,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
-static int is_sync(struct sk_buff *skb, int type)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(skb, type);
- if (!hdr)
- return 0;
-
- return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC;
-}
-
static void dp83640_free_clocks(void)
{
struct dp83640_clock *clock;
@@ -1329,7 +1318,7 @@ static void rx_timestamp_work(struct work_struct *work)
break;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (!skb_queue_empty(&dp83640->rx_queue))
@@ -1380,7 +1369,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
return true;
@@ -1396,7 +1385,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
switch (dp83640->hwts_tx_en) {
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (is_sync(skb, type)) {
+ if (ptp_msg_is_sync(skb, type)) {
kfree_skb(skb);
return;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index a7ebcdab415b..19b11e896460 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,10 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -79,6 +83,119 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define LAN8814_1PPM_FORMAT 17179
+
+#define PTP_RX_MOD 0x024F
+#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+#define PTP_RX_TIMESTAMP_EN 0x024D
+#define PTP_TX_TIMESTAMP_EN 0x028D
+
+#define PTP_TIMESTAMP_EN_SYNC_ BIT(0)
+#define PTP_TIMESTAMP_EN_DREQ_ BIT(1)
+#define PTP_TIMESTAMP_EN_PDREQ_ BIT(2)
+#define PTP_TIMESTAMP_EN_PDRES_ BIT(3)
+
+#define PTP_RX_LATENCY_1000 0x0224
+#define PTP_TX_LATENCY_1000 0x0225
+
+#define PTP_RX_LATENCY_100 0x0222
+#define PTP_TX_LATENCY_100 0x0223
+
+#define PTP_RX_LATENCY_10 0x0220
+#define PTP_TX_LATENCY_10 0x0221
+
+#define PTP_TX_PARSE_L2_ADDR_EN 0x0284
+#define PTP_RX_PARSE_L2_ADDR_EN 0x0244
+
+#define PTP_TX_PARSE_IP_ADDR_EN 0x0285
+#define PTP_RX_PARSE_IP_ADDR_EN 0x0245
+#define LTC_HARD_RESET 0x023F
+#define LTC_HARD_RESET_ BIT(0)
+
+#define TSU_HARD_RESET 0x02C1
+#define TSU_HARD_RESET_ BIT(0)
+
+#define PTP_CMD_CTL 0x0200
+#define PTP_CMD_CTL_PTP_DISABLE_ BIT(0)
+#define PTP_CMD_CTL_PTP_ENABLE_ BIT(1)
+#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3)
+#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4)
+#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
+#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+
+#define PTP_CLOCK_SET_SEC_MID 0x0206
+#define PTP_CLOCK_SET_SEC_LO 0x0207
+#define PTP_CLOCK_SET_NS_HI 0x0208
+#define PTP_CLOCK_SET_NS_LO 0x0209
+
+#define PTP_CLOCK_READ_SEC_MID 0x022A
+#define PTP_CLOCK_READ_SEC_LO 0x022B
+#define PTP_CLOCK_READ_NS_HI 0x022C
+#define PTP_CLOCK_READ_NS_LO 0x022D
+
+#define PTP_OPERATING_MODE 0x0241
+#define PTP_OPERATING_MODE_STANDALONE_ BIT(0)
+
+#define PTP_TX_MOD 0x028F
+#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ BIT(12)
+#define PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
+
+#define PTP_RX_PARSE_CONFIG 0x0242
+#define PTP_RX_PARSE_CONFIG_LAYER2_EN_ BIT(0)
+#define PTP_RX_PARSE_CONFIG_IPV4_EN_ BIT(1)
+#define PTP_RX_PARSE_CONFIG_IPV6_EN_ BIT(2)
+
+#define PTP_TX_PARSE_CONFIG 0x0282
+#define PTP_TX_PARSE_CONFIG_LAYER2_EN_ BIT(0)
+#define PTP_TX_PARSE_CONFIG_IPV4_EN_ BIT(1)
+#define PTP_TX_PARSE_CONFIG_IPV6_EN_ BIT(2)
+
+#define PTP_CLOCK_RATE_ADJ_HI 0x020C
+#define PTP_CLOCK_RATE_ADJ_LO 0x020D
+#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(15)
+
+#define PTP_LTC_STEP_ADJ_HI 0x0212
+#define PTP_LTC_STEP_ADJ_LO 0x0213
+#define PTP_LTC_STEP_ADJ_DIR_ BIT(15)
+
+#define LAN8814_INTR_STS_REG 0x0033
+#define LAN8814_INTR_STS_REG_1588_TSU0_ BIT(0)
+#define LAN8814_INTR_STS_REG_1588_TSU1_ BIT(1)
+#define LAN8814_INTR_STS_REG_1588_TSU2_ BIT(2)
+#define LAN8814_INTR_STS_REG_1588_TSU3_ BIT(3)
+
+#define PTP_CAP_INFO 0x022A
+#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x0f00) >> 8)
+#define PTP_CAP_INFO_RX_TS_CNT_GET_(reg_val) ((reg_val) & 0x000f)
+
+#define PTP_TX_EGRESS_SEC_HI 0x0296
+#define PTP_TX_EGRESS_SEC_LO 0x0297
+#define PTP_TX_EGRESS_NS_HI 0x0294
+#define PTP_TX_EGRESS_NS_LO 0x0295
+#define PTP_TX_MSG_HEADER2 0x0299
+
+#define PTP_RX_INGRESS_SEC_HI 0x0256
+#define PTP_RX_INGRESS_SEC_LO 0x0257
+#define PTP_RX_INGRESS_NS_HI 0x0254
+#define PTP_RX_INGRESS_NS_LO 0x0255
+#define PTP_RX_MSG_HEADER2 0x0259
+
+#define PTP_TSU_INT_EN 0x0200
+#define PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ BIT(3)
+#define PTP_TSU_INT_EN_PTP_TX_TS_EN_ BIT(2)
+#define PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_ BIT(1)
+#define PTP_TSU_INT_EN_PTP_RX_TS_EN_ BIT(0)
+
+#define PTP_TSU_INT_STS 0x0201
+#define PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_ BIT(3)
+#define PTP_TSU_INT_STS_PTP_TX_TS_EN_ BIT(2)
+#define PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_ BIT(1)
+#define PTP_TSU_INT_STS_PTP_RX_TS_EN_ BIT(0)
+
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
@@ -108,6 +225,7 @@
#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
#define PS_TO_REG 200
+#define FIFO_SIZE 8
struct kszphy_hw_stat {
const char *string;
@@ -128,7 +246,57 @@ struct kszphy_type {
bool has_rmii_ref_clk_sel;
};
+/* Shared structure between the PHYs of the same package. */
+struct lan8814_shared_priv {
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+
+ /* Reference counter to how many ports in the package are enabling the
+ * timestamping
+ */
+ u8 ref;
+
+ /* Lock for ptp_clock and ref */
+ struct mutex shared_lock;
+};
+
+struct lan8814_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+struct kszphy_latencies {
+ u16 rx_10;
+ u16 tx_10;
+ u16 rx_100;
+ u16 tx_100;
+ u16 rx_1000;
+ u16 tx_1000;
+};
+
+struct kszphy_ptp_priv {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+
+ struct list_head rx_ts_list;
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+
+ int hwts_tx_type;
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+};
+
struct kszphy_priv {
+ struct kszphy_ptp_priv ptp_priv;
+ struct kszphy_latencies latencies;
const struct kszphy_type *type;
int led_mode;
bool rmii_ref_clk_sel;
@@ -136,6 +304,14 @@ struct kszphy_priv {
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
+static struct kszphy_latencies lan8814_latencies = {
+ .rx_10 = 0x22AA,
+ .tx_10 = 0x2E4A,
+ .rx_100 = 0x092A,
+ .tx_100 = 0x02C1,
+ .rx_1000 = 0x01AD,
+ .tx_1000 = 0x00C9,
+};
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
.has_broadcast_disable = true,
@@ -1596,11 +1772,13 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
{
u32 data;
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
- data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ data = __phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+ phy_unlock_mdio_bus(phydev);
return data;
}
@@ -1608,43 +1786,670 @@ static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
u16 val)
{
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
- phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
- (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC);
- val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
- if (val) {
+ val = __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
+ if (val != 0)
phydev_err(phydev, "Error: phy_write has returned error %d\n",
val);
- return val;
+ phy_unlock_mdio_bus(phydev);
+ return val;
+}
+
+static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = PTP_TSU_INT_EN_PTP_TX_TS_EN_ |
+ PTP_TSU_INT_EN_PTP_TX_TS_OVRFL_EN_ |
+ PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
+ PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
+
+ return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
+}
+
+static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds, u16 *seq_id)
+{
+ *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
+ *seconds = (*seconds << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
+
+ *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
+}
+
+static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds, u16 *seq_id)
+{
+ *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
+ *seconds = *seconds << 16 |
+ lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
+
+ *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
+}
+
+static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(shared->ptp_clock);
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
+{
+ int i;
+
+ for (i = 0; i < FIFO_SIZE; ++i)
+ lanphy_read_page_reg(phydev, 5,
+ egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
+
+ /* Read to clear overflow status bit */
+ lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+}
+
+static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ struct hwtstamp_config config;
+ int txcfg = 0, rxcfg = 0;
+ int pkt_ts_enable;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ptp_priv->hwts_tx_type = config.tx_type;
+ ptp_priv->rx_filter = config.rx_filter;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp_priv->layer = 0;
+ ptp_priv->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (ptp_priv->layer & PTP_CLASS_L2) {
+ rxcfg = PTP_RX_PARSE_CONFIG_LAYER2_EN_;
+ txcfg = PTP_TX_PARSE_CONFIG_LAYER2_EN_;
+ } else if (ptp_priv->layer & PTP_CLASS_L4) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
}
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
+
+ pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
+ PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ lan8814_config_ts_intr(ptp_priv->phydev, true);
+ else
+ lan8814_config_ts_intr(ptp_priv->phydev, false);
+
+ mutex_lock(&shared->shared_lock);
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ shared->ref++;
+ else
+ shared->ref--;
+
+ if (shared->ref)
+ lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_ENABLE_);
+ else
+ lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_DISABLE_);
+ mutex_unlock(&shared->shared_lock);
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ skb_queue_purge(&ptp_priv->rx_queue);
+ skb_queue_purge(&ptp_priv->tx_queue);
+
+ lan8814_flush_fifo(ptp_priv->phydev, false);
+ lan8814_flush_fifo(ptp_priv->phydev, true);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ switch (ptp_priv->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&ptp_priv->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ u32 type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+}
+
+static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ unsigned long flags;
+ bool ret = false;
+ u16 skb_sig;
+
+ lan8814_get_sig_rx(skb, &skb_sig);
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds,
+ rx_ts->nsec);
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+
+ if (ret)
+ netif_rx(skb);
+ return ret;
+}
+
+static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb, int type)
+{
+ struct kszphy_ptp_priv *ptp_priv =
+ container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ if (ptp_priv->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & ptp_priv->version) == 0 || (type & ptp_priv->layer) == 0)
+ return false;
+
+ /* If we failed to match then add it to the queue for when the timestamp
+ * will come
+ */
+ if (!lan8814_match_rx_ts(ptp_priv, skb))
+ skb_queue_tail(&ptp_priv->rx_queue, skb);
+
+ return true;
+}
+
+static void lan8814_ptp_clock_set(struct phy_device *phydev,
+ u32 seconds, u32 nano_seconds)
+{
+ u32 sec_low, sec_high, nsec_low, nsec_high;
+
+ sec_low = seconds & 0xffff;
+ sec_high = (seconds >> 16) & 0xffff;
+ nsec_low = nano_seconds & 0xffff;
+ nsec_high = (nano_seconds >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+}
+
+static void lan8814_ptp_clock_get(struct phy_device *phydev,
+ u32 *seconds, u32 *nano_seconds)
+{
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+
+ *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *seconds = (*seconds << 16) |
+ lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+
+ *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
+ lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+}
+
+static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
+ struct timespec64 *ts)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ u32 nano_seconds;
+ u32 seconds;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
+ mutex_unlock(&shared->shared_lock);
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nano_seconds;
+
return 0;
}
-static int lan8814_config_init(struct phy_device *phydev)
+static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci,
+ const struct timespec64 *ts)
{
- int val;
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
- /* Reset the PHY */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
- val |= LAN8814_QSGMII_SOFT_RESET_BIT;
- lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_set(phydev, ts->tv_sec, ts->tv_nsec);
+ mutex_unlock(&shared->shared_lock);
- /* Disable ANEG with QSGMII PCS Host side */
- val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
- val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
- lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+ return 0;
+}
- /* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
- val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8814_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+static void lan8814_ptp_clock_step(struct phy_device *phydev,
+ s64 time_step_ns)
+{
+ u32 nano_seconds_step;
+ u64 abs_time_step_ns;
+ u32 unsigned_seconds;
+ u32 nano_seconds;
+ u32 remainder;
+ s32 seconds;
+
+ if (time_step_ns > 15000000000LL) {
+ /* convert to clock set */
+ lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
+ unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds += remainder;
+ if (nano_seconds >= 1000000000) {
+ unsigned_seconds++;
+ nano_seconds -= 1000000000;
+ }
+ lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ return;
+ } else if (time_step_ns < -15000000000LL) {
+ /* convert to clock set */
+ time_step_ns = -time_step_ns;
+
+ lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
+ unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
+ nano_seconds_step = remainder;
+ if (nano_seconds < nano_seconds_step) {
+ unsigned_seconds--;
+ nano_seconds += 1000000000;
+ }
+ nano_seconds -= nano_seconds_step;
+ lan8814_ptp_clock_set(phydev, unsigned_seconds,
+ nano_seconds);
+ return;
+ }
+
+ /* do clock step */
+ if (time_step_ns >= 0) {
+ abs_time_step_ns = (u64)time_step_ns;
+ seconds = (s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder);
+ nano_seconds = remainder;
+ } else {
+ abs_time_step_ns = (u64)(-time_step_ns);
+ seconds = -((s32)div_u64_rem(abs_time_step_ns, 1000000000,
+ &remainder));
+ nano_seconds = remainder;
+ if (nano_seconds > 0) {
+ /* subtracting nano seconds is not allowed
+ * convert to subtracting from seconds,
+ * and adding to nanoseconds
+ */
+ seconds--;
+ nano_seconds = (1000000000 - nano_seconds);
+ }
+ }
+
+ if (nano_seconds > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nano_seconds += 8;
+ }
+
+ if (nano_seconds >= 1000000000) {
+ /* carry into seconds */
+ seconds++;
+ nano_seconds -= 1000000000;
+ }
+
+ while (seconds) {
+ if (seconds > 0) {
+ u32 adjustment_value = (u32)seconds;
+ u16 adjustment_value_lo, adjustment_value_hi;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+
+ adjustment_value_lo = adjustment_value & 0xffff;
+ adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ adjustment_value_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ PTP_LTC_STEP_ADJ_DIR_ |
+ adjustment_value_hi);
+ seconds -= ((s32)adjustment_value);
+ } else {
+ u32 adjustment_value = (u32)(-seconds);
+ u16 adjustment_value_lo, adjustment_value_hi;
+
+ if (adjustment_value > 0xF)
+ adjustment_value = 0xF;
+
+ adjustment_value_lo = adjustment_value & 0xffff;
+ adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ adjustment_value_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ adjustment_value_hi);
+ seconds += ((s32)adjustment_value);
+ }
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
+ }
+ if (nano_seconds) {
+ u16 nano_seconds_lo;
+ u16 nano_seconds_hi;
+
+ nano_seconds_lo = nano_seconds & 0xffff;
+ nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
+
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ nano_seconds_lo);
+ lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ PTP_LTC_STEP_ADJ_DIR_ |
+ nano_seconds_hi);
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
+ }
+}
+
+static int lan8814_ptpci_adjtime(struct ptp_clock_info *ptpci, s64 delta)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+
+ mutex_lock(&shared->shared_lock);
+ lan8814_ptp_clock_step(phydev, delta);
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ u16 kszphy_rate_adj_lo, kszphy_rate_adj_hi;
+ bool positive = true;
+ u32 kszphy_rate_adj;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ positive = false;
+ }
+
+ kszphy_rate_adj = LAN8814_1PPM_FORMAT * (scaled_ppm >> 16);
+ kszphy_rate_adj += (LAN8814_1PPM_FORMAT * (0xffff & scaled_ppm)) >> 16;
+
+ kszphy_rate_adj_lo = kszphy_rate_adj & 0xffff;
+ kszphy_rate_adj_hi = (kszphy_rate_adj >> 16) & 0x3fff;
+
+ if (positive)
+ kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
+
+ mutex_lock(&shared->shared_lock);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
+ mutex_unlock(&shared->shared_lock);
return 0;
}
+static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ u32 type;
+
+ type = ptp_classify_raw(skb);
+ ptp_header = ptp_parse_header(skb, type);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+}
+
+static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ u32 seconds, nsec;
+ bool ret = false;
+ u16 skb_sig;
+ u16 seq_id;
+
+ lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
+
+ spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
+ lan8814_get_sig_tx(skb, &skb_sig);
+
+ if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
+ continue;
+
+ __skb_unlink(skb, &ptp_priv->tx_queue);
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->tx_queue.lock, flags);
+
+ if (ret) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ u32 reg;
+
+ do {
+ lan8814_dequeue_tx_skb(ptp_priv);
+
+ /* If other timestamps are available in the FIFO,
+ * process them.
+ */
+ reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ } while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
+}
+
+static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
+ struct lan8814_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool ret = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
+ skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
+ lan8814_get_sig_rx(skb, &skb_sig);
+
+ if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
+ continue;
+
+ __skb_unlink(skb, &ptp_priv->rx_queue);
+
+ ret = true;
+ break;
+ }
+ spin_unlock_irqrestore(&ptp_priv->rx_queue.lock, flags);
+
+ if (ret) {
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return ret;
+}
+
+static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts;
+ unsigned long flags;
+ u32 reg;
+
+ do {
+ rx_ts = kzalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return;
+
+ lan8814_ptp_rx_ts_get(phydev, &rx_ts->seconds, &rx_ts->nsec,
+ &rx_ts->seq_id);
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!lan8814_match_skb(ptp_priv, rx_ts)) {
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+
+ /* If other timestamps are available in the FIFO,
+ * process them.
+ */
+ reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ } while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
+}
+
+static void lan8814_handle_ptp_interrupt(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u16 status;
+
+ status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
+ lan8814_get_tx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_)
+ lan8814_get_rx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+}
+
static int lan8804_config_init(struct phy_device *phydev)
{
int val;
@@ -1666,17 +2471,31 @@ static int lan8804_config_init(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
+ u16 tsu_irq_status;
int irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status < 0)
- return IRQ_NONE;
+ if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
+ phy_trigger_machine(phydev);
- if (!(irq_status & LAN8814_INT_LINK))
+ if (irq_status < 0) {
+ phy_error(phydev);
return IRQ_NONE;
+ }
- phy_trigger_machine(phydev);
+ while (1) {
+ tsu_irq_status = lanphy_read_page_reg(phydev, 4,
+ LAN8814_INTR_STS_REG);
+ if (tsu_irq_status > 0 &&
+ (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
+ LAN8814_INTR_STS_REG_1588_TSU1_ |
+ LAN8814_INTR_STS_REG_1588_TSU2_ |
+ LAN8814_INTR_STS_REG_1588_TSU3_)))
+ lan8814_handle_ptp_interrupt(phydev);
+ else
+ break;
+ }
return IRQ_HANDLED;
}
@@ -1716,6 +2535,223 @@ static int lan8814_config_intr(struct phy_device *phydev)
return err;
}
+static void lan8814_ptp_init(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u32 temp;
+
+ lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
+
+ temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
+ temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
+ lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
+
+ temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
+ temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
+ lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
+
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
+
+ /* Removing default registers configs related to L2 and IP */
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+ spin_lock_init(&ptp_priv->rx_ts_lock);
+
+ ptp_priv->phydev = phydev;
+
+ ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
+ ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
+ ptp_priv->mii_ts.hwtstamp = lan8814_hwtstamp;
+ ptp_priv->mii_ts.ts_info = lan8814_ts_info;
+
+ phydev->mii_ts = &ptp_priv->mii_ts;
+}
+
+static int lan8814_ptp_probe_once(struct phy_device *phydev)
+{
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+
+ /* Initialise shared lock for clock*/
+ mutex_init(&shared->shared_lock);
+
+ shared->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name);
+ shared->ptp_clock_info.max_adj = 31249999;
+ shared->ptp_clock_info.n_alarm = 0;
+ shared->ptp_clock_info.n_ext_ts = 0;
+ shared->ptp_clock_info.n_pins = 0;
+ shared->ptp_clock_info.pps = 0;
+ shared->ptp_clock_info.pin_config = NULL;
+ shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
+ shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime;
+ shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64;
+ shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64;
+ shared->ptp_clock_info.getcrosststamp = NULL;
+
+ shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
+ &phydev->mdio.dev);
+ if (IS_ERR_OR_NULL(shared->ptp_clock)) {
+ phydev_err(phydev, "ptp_clock_register failed %lu\n",
+ PTR_ERR(shared->ptp_clock));
+ return -EINVAL;
+ }
+
+ phydev_dbg(phydev, "successfully registered ptp clock\n");
+
+ shared->phydev = phydev;
+
+ /* The EP.4 is shared between all the PHYs in the package and also it
+ * can be accessed by any of the PHYs
+ */
+ lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
+ lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
+ PTP_OPERATING_MODE_STANDALONE_);
+
+ return 0;
+}
+
+static int lan8814_read_status(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_latencies *latencies = &priv->latencies;
+ int err;
+ int regval;
+
+ err = genphy_read_status(phydev);
+ if (err)
+ return err;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000,
+ latencies->rx_1000);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000,
+ latencies->tx_1000);
+ break;
+ case SPEED_100:
+ lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100,
+ latencies->rx_100);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100,
+ latencies->tx_100);
+ break;
+ case SPEED_10:
+ lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10,
+ latencies->rx_10);
+ lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10,
+ latencies->tx_10);
+ break;
+ default:
+ break;
+ }
+
+ /* Make sure the PHY is not broken. Read idle error count,
+ * and reset the PHY if it is maxed out.
+ */
+ regval = phy_read(phydev, MII_STAT1000);
+ if ((regval & 0xFF) == 0xFF) {
+ phy_init_hw(phydev);
+ phydev->link = 0;
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+ phydev->drv->config_intr(phydev);
+ return genphy_config_aneg(phydev);
+ }
+
+ return 0;
+}
+
+static int lan8814_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ /* Reset the PHY */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
+ val |= LAN8814_QSGMII_SOFT_RESET_BIT;
+ lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+
+ /* Disable ANEG with QSGMII PCS Host side */
+ val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
+ val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
+ lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
+ val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8814_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+
+ return 0;
+}
+
+static void lan8814_parse_latency(struct phy_device *phydev)
+{
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_latencies *latency = &priv->latencies;
+ u32 val;
+
+ if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val))
+ latency->rx_10 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val))
+ latency->tx_10 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val))
+ latency->rx_100 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val))
+ latency->tx_100 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val))
+ latency->rx_1000 = val;
+ if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val))
+ latency->tx_1000 = val;
+}
+
+static int lan8814_probe(struct phy_device *phydev)
+{
+ const struct device_node *np = phydev->mdio.dev.of_node;
+ struct kszphy_priv *priv;
+ u16 addr;
+ int err;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->led_mode = -1;
+
+ priv->latencies = lan8814_latencies;
+
+ phydev->priv = priv;
+
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
+ !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ||
+ of_property_read_bool(np, "lan8814,ignore-ts"))
+ return 0;
+
+ /* Strap-in value for PHY address, below register read gives starting
+ * phy address value
+ */
+ addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
+ devm_phy_package_join(&phydev->mdio.dev, phydev,
+ addr, sizeof(struct lan8814_shared_priv));
+
+ if (phy_package_init_once(phydev)) {
+ err = lan8814_ptp_probe_once(phydev);
+ if (err)
+ return err;
+ }
+
+ lan8814_parse_latency(phydev);
+ lan8814_ptp_init(phydev);
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1890,10 +2926,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
.config_init = lan8814_config_init,
- .driver_data = &ksz9021_type,
- .probe = kszphy_probe,
+ .probe = lan8814_probe,
.soft_reset = genphy_soft_reset,
- .read_status = ksz9031_read_status,
+ .read_status = lan8814_read_status,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index bc50224d43dd..389df3f4293c 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -8,11 +8,17 @@
#include <linux/phy.h>
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+
+#define PHY_ID_LAN87XX 0x0007c150
+#define PHY_ID_LAN937X 0x0007c180
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
#define LAN87XX_EXT_REG_CTL_WR_CTL (0x0800)
+#define LAN87XX_REG_BANK_SEL_MASK GENMASK(10, 8)
+#define LAN87XX_REG_ADDR_MASK GENMASK(7, 0)
/* External Register Read Data Register */
#define LAN87XX_EXT_REG_RD_DATA (0x15)
@@ -37,6 +43,7 @@
#define PHYACC_ATTR_MODE_READ 0
#define PHYACC_ATTR_MODE_WRITE 1
#define PHYACC_ATTR_MODE_MODIFY 2
+#define PHYACC_ATTR_MODE_POLL 3
#define PHYACC_ATTR_BANK_SMI 0
#define PHYACC_ATTR_BANK_MISC 1
@@ -50,8 +57,33 @@
#define LAN87XX_CABLE_TEST_OPEN 1
#define LAN87XX_CABLE_TEST_SAME_SHORT 2
+/* T1 Registers */
+#define T1_AFE_PORT_CFG1_REG 0x0B
+#define T1_POWER_DOWN_CONTROL_REG 0x1A
+#define T1_SLV_FD_MULT_CFG_REG 0x18
+#define T1_CDR_CFG_PRE_LOCK_REG 0x05
+#define T1_CDR_CFG_POST_LOCK_REG 0x06
+#define T1_LCK_STG2_MUFACT_CFG_REG 0x1A
+#define T1_LCK_STG3_MUFACT_CFG_REG 0x1B
+#define T1_POST_LCK_MUFACT_CFG_REG 0x1C
+#define T1_TX_RX_FIFO_CFG_REG 0x02
+#define T1_TX_LPF_FIR_CFG_REG 0x55
+#define T1_SQI_CONFIG_REG 0x2E
+#define T1_MDIO_CONTROL2_REG 0x10
+#define T1_INTERRUPT_SOURCE_REG 0x18
+#define T1_INTERRUPT2_SOURCE_REG 0x08
+#define T1_EQ_FD_STG1_FRZ_CFG 0x69
+#define T1_EQ_FD_STG2_FRZ_CFG 0x6A
+#define T1_EQ_FD_STG3_FRZ_CFG 0x6B
+#define T1_EQ_FD_STG4_FRZ_CFG 0x6C
+#define T1_EQ_WT_FD_LCK_FRZ_CFG 0x6D
+#define T1_PST_EQ_LCK_STG1_FRZ_CFG 0x6E
+
+#define T1_MODE_STAT_REG 0x11
+#define T1_LINK_UP_MSK BIT(0)
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
-#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
+#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
struct access_ereg_val {
u8 mode;
@@ -61,6 +93,37 @@ struct access_ereg_val {
u16 mask;
};
+static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
+{
+ u8 prev_bank;
+ int rc = 0;
+ u16 val;
+
+ mutex_lock(&phydev->lock);
+ /* Read previous selected bank */
+ rc = phy_read(phydev, LAN87XX_EXT_REG_CTL);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* store the prev_bank */
+ prev_bank = FIELD_GET(LAN87XX_REG_BANK_SEL_MASK, rc);
+
+ if (bank != prev_bank && bank == PHYACC_ATTR_BANK_DSP) {
+ val = ereg & ~LAN87XX_REG_ADDR_MASK;
+
+ val &= ~LAN87XX_EXT_REG_CTL_WR_CTL;
+ val |= LAN87XX_EXT_REG_CTL_RD_CTL;
+
+ /* access twice for DSP bank change,dummy access */
+ rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, val);
+ }
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
u8 offset, u16 val)
{
@@ -89,6 +152,13 @@ static int access_ereg(struct phy_device *phydev, u8 mode, u8 bank,
ereg |= (bank << 8) | offset;
+ /* DSP bank access workaround for lan937x */
+ if (phydev->phy_id == PHY_ID_LAN937X) {
+ rc = lan937x_dsp_workaround(phydev, ereg, bank);
+ if (rc < 0)
+ return rc;
+ }
+
rc = phy_write(phydev, LAN87XX_EXT_REG_CTL, ereg);
if (rc < 0)
return rc;
@@ -117,6 +187,15 @@ static int access_ereg_modify_changed(struct phy_device *phydev,
return rc;
}
+static int access_smi_poll_timeout(struct phy_device *phydev,
+ u8 offset, u16 mask, u16 clr)
+{
+ int val;
+
+ return phy_read_poll_timeout(phydev, offset, val, (val & mask) == clr,
+ 150, 30000, true);
+}
+
static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
{
int rc;
@@ -157,68 +236,159 @@ static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
static int lan87xx_phy_init(struct phy_device *phydev)
{
static const struct access_ereg_val init[] = {
- /* TX Amplitude = 5 */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_AFE, 0x0B,
- 0x000A, 0x001E},
- /* Clear SMI interrupts */
- {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI, 0x18,
- 0, 0},
- /* Clear MISC interrupts */
- {PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC, 0x08,
- 0, 0},
- /* Turn on TC10 Ring Oscillator (ROSC) */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_MISC, 0x20,
- 0x0020, 0x0020},
- /* WUR Detect Length to 1.2uS, LPC Detect Length to 1.09uS */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_PCS, 0x20,
- 0x283C, 0},
- /* Wake_In Debounce Length to 39uS, Wake_Out Length to 79uS */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x21,
- 0x274F, 0},
- /* Enable Auto Wake Forward to Wake_Out, ROSC on, Sleep,
- * and Wake_In to wake PHY
- */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x20,
- 0x80A7, 0},
- /* Enable WUP Auto Fwd, Enable Wake on MDI, Wakeup Debouncer
- * to 128 uS
- */
- {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_MISC, 0x24,
- 0xF110, 0},
- /* Enable HW Init */
- {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_SMI, 0x1A,
- 0x0100, 0x0100},
+ /* TXPD/TXAMP6 Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
+ T1_AFE_PORT_CFG1_REG, 0x002D, 0 },
+ /* HW_Init Hi and Force_ED */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0308, 0 },
+ /* Equalizer Full Duplex Freeze - T1 Slave */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG1_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG2_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG3_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_FD_STG4_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_EQ_WT_FD_LCK_FRZ_CFG, 0x0002, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_PST_EQ_LCK_STG1_FRZ_CFG, 0x0002, 0 },
+ /* Slave Full Duplex Multi Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SLV_FD_MULT_CFG_REG, 0x0D53, 0 },
+ /* CDR Pre and Post Lock Configs */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_CDR_CFG_PRE_LOCK_REG, 0x0AB2, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_CDR_CFG_POST_LOCK_REG, 0x0AB3, 0 },
+ /* Lock Stage 2-3 Multi Factor Config */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_LCK_STG2_MUFACT_CFG_REG, 0x0AEA, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_LCK_STG3_MUFACT_CFG_REG, 0x0AEB, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_POST_LCK_MUFACT_CFG_REG, 0x0AEB, 0 },
+ /* Pointer delay */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_RX_FIFO_CFG_REG, 0x1C00, 0 },
+ /* Tx iir edits */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1861, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1061, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1922, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1122, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1983, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1183, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1944, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1144, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x18c5, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x10c5, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1846, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1046, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1807, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1007, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1808, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1008, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1809, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1009, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180A, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100A, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180B, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100B, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180C, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100C, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180D, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100D, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180E, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100E, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x180F, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x100F, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1810, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1010, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1811, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 },
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 },
+ /* SQI enable */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
+ T1_SQI_CONFIG_REG, 0x9572, 0 },
+ /* Flag LPS and WUR as idle errors */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0014, 0 },
+ /* HW_Init toggle, undo force ED, TXPD off */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0200, 0 },
+ /* Reset PCS to trigger hardware initialization */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0094, 0 },
+ /* Poll till Hardware is initialized */
+ { PHYACC_ATTR_MODE_POLL, PHYACC_ATTR_BANK_SMI,
+ T1_MDIO_CONTROL2_REG, 0x0080, 0 },
+ /* Tx AMP - 0x06 */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
+ T1_AFE_PORT_CFG1_REG, 0x000C, 0 },
+ /* Read INTERRUPT_SOURCE Register */
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI,
+ T1_INTERRUPT_SOURCE_REG, 0, 0 },
+ /* Read INTERRUPT_SOURCE Register */
+ { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_MISC,
+ T1_INTERRUPT2_SOURCE_REG, 0, 0 },
+ /* HW_Init Hi */
+ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
+ T1_POWER_DOWN_CONTROL_REG, 0x0300, 0 },
};
int rc, i;
- /* Start manual initialization procedures in Managed Mode */
- rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
- 0x1a, 0x0000, 0x0100);
+ /* phy Soft reset */
+ rc = genphy_soft_reset(phydev);
if (rc < 0)
return rc;
- /* Soft Reset the SMI block */
- rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
- 0x00, 0x8000, 0x8000);
- if (rc < 0)
- return rc;
-
- /* Check to see if the self-clearing bit is cleared */
- usleep_range(1000, 2000);
- rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
- PHYACC_ATTR_BANK_SMI, 0x00, 0);
- if (rc < 0)
- return rc;
- if ((rc & 0x8000) != 0)
- return -ETIMEDOUT;
-
/* PHY Initialization */
for (i = 0; i < ARRAY_SIZE(init); i++) {
- if (init[i].mode == PHYACC_ATTR_MODE_MODIFY) {
- rc = access_ereg_modify_changed(phydev, init[i].bank,
- init[i].offset,
- init[i].val,
- init[i].mask);
+ if (init[i].mode == PHYACC_ATTR_MODE_POLL &&
+ init[i].bank == PHYACC_ATTR_BANK_SMI) {
+ rc = access_smi_poll_timeout(phydev,
+ init[i].offset,
+ init[i].val,
+ init[i].mask);
} else {
rc = access_ereg(phydev, init[i].mode, init[i].bank,
init[i].offset, init[i].val);
@@ -504,22 +674,86 @@ static int lan87xx_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int lan87xx_read_status(struct phy_device *phydev)
+{
+ int rc = 0;
+
+ rc = phy_read(phydev, T1_MODE_STAT_REG);
+ if (rc < 0)
+ return rc;
+
+ if (rc & T1_LINK_UP_MSK)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ rc = genphy_read_master_slave(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = genphy_read_status_fixed(phydev);
+ if (rc < 0)
+ return rc;
+
+ return rc;
+}
+
+static int lan87xx_config_aneg(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+ int rc;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl |= CTL1000_AS_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ if (rc == 1)
+ rc = genphy_soft_reset(phydev);
+
+ return rc;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
- .phy_id = 0x0007c150,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
.name = "Microchip LAN87xx T1",
.flags = PHY_POLL_CABLE_TEST,
-
.features = PHY_BASIC_T1_FEATURES,
-
.config_init = lan87xx_config_init,
-
.config_intr = lan87xx_phy_config_intr,
.handle_interrupt = lan87xx_handle_interrupt,
-
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .config_aneg = lan87xx_config_aneg,
+ .read_status = lan87xx_read_status,
+ .cable_test_start = lan87xx_cable_test_start,
+ .cable_test_get_status = lan87xx_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
+ .name = "Microchip LAN937x T1",
+ .features = PHY_BASIC_T1_FEATURES,
+ .config_init = lan87xx_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = lan87xx_config_aneg,
+ .read_status = lan87xx_read_status,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
}
@@ -528,7 +762,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
module_phy_driver(microchip_t1_phy_driver);
static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
- { 0x0007c150, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 34f829845d06..cf728bfd83e2 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1212,7 +1212,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
ts.tv_sec--;
shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
return true;
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 06fdbae509a7..047c581457e3 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -478,7 +478,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
shhwtstamps_rx = skb_hwtstamps(skb);
shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (priv->extts) {
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 271fc01f7f7f..2001f3329133 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -243,7 +243,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
-static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
+static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
{
const struct phy_setting *p;
int i;
@@ -254,13 +254,11 @@ static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
else
break;
}
-
- return 0;
}
-static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- return __set_linkmode_max_speed(max_speed, phydev->supported);
+ __set_linkmode_max_speed(max_speed, phydev->supported);
}
/**
@@ -273,17 +271,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
* is connected to a 1G PHY. This function allows the MAC to indicate its
* maximum speed, and so limit what the PHY will advertise.
*/
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
- int err;
-
- err = __set_phy_supported(phydev, max_speed);
- if (err)
- return err;
+ __set_phy_supported(phydev, max_speed);
phy_advertise_supported(phydev);
-
- return 0;
}
EXPORT_SYMBOL(phy_set_max_speed);
@@ -440,7 +432,9 @@ int phy_speed_down_core(struct phy_device *phydev)
if (min_common_speed == SPEED_UNKNOWN)
return -EINVAL;
- return __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+ __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+
+ return 0;
}
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ce0bb5951b81..8406ac739def 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2051,17 +2051,11 @@ static int genphy_setup_master_slave(struct phy_device *phydev)
CTL1000_PREFER_MASTER), ctl);
}
-static int genphy_read_master_slave(struct phy_device *phydev)
+int genphy_read_master_slave(struct phy_device *phydev)
{
int cfg, state;
int val;
- if (!phydev->is_gigabit_capable) {
- phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
- phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
- return 0;
- }
-
phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
@@ -2102,6 +2096,7 @@ static int genphy_read_master_slave(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL(genphy_read_master_slave);
/**
* genphy_restart_aneg - Enable and Restart Autonegotiation
@@ -2396,14 +2391,18 @@ int genphy_read_status(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
- err = genphy_read_master_slave(phydev);
- if (err < 0)
- return err;
+ if (phydev->is_gigabit_capable) {
+ err = genphy_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+ }
err = genphy_read_lpa(phydev);
if (err < 0)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 420201858564..06943889d747 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -74,6 +74,7 @@ struct phylink {
struct work_struct resolve;
bool mac_link_dropped;
+ bool using_mac_select_pcs;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
@@ -132,17 +133,6 @@ void phylink_set_port_modes(unsigned long *mask)
}
EXPORT_SYMBOL_GPL(phylink_set_port_modes);
-void phylink_set_10g_modes(unsigned long *mask)
-{
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
-}
-EXPORT_SYMBOL_GPL(phylink_set_10g_modes);
-
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };
@@ -427,7 +417,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
int ret;
/* Get the PCS for this interface mode */
- if (pl->mac_ops->mac_select_pcs) {
+ if (pl->using_mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs))
return PTR_ERR(pcs);
@@ -802,7 +792,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
- if (pl->mac_ops->mac_select_pcs) {
+ if (pl->using_mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
@@ -825,8 +815,18 @@ static void phylink_major_config(struct phylink *pl, bool restart,
/* If we have a new PCS, switch to the new PCS after preparing the MAC
* for the change.
*/
- if (pcs)
- phylink_set_pcs(pl, pcs);
+ if (pcs) {
+ pl->pcs = pcs;
+ pl->pcs_ops = pcs->ops;
+
+ if (!pl->phylink_disable_state &&
+ pl->cfg_link_an_mode == MLO_AN_INBAND) {
+ if (pcs->poll)
+ mod_timer(&pl->link_poll, jiffies + HZ);
+ else
+ del_timer(&pl->link_poll);
+ }
+ }
phylink_mac_config(pl, state);
@@ -1182,9 +1182,8 @@ static int phylink_register_sfp(struct phylink *pl,
bus = sfp_bus_find_fwnode(fwnode);
if (IS_ERR(bus)) {
- ret = PTR_ERR(bus);
- phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
- return ret;
+ phylink_err(pl, "unable to attach SFP bus: %pe\n", bus);
+ return PTR_ERR(bus);
}
pl->sfp_bus = bus;
@@ -1216,11 +1215,17 @@ struct phylink *phylink_create(struct phylink_config *config,
phy_interface_t iface,
const struct phylink_mac_ops *mac_ops)
{
+ bool using_mac_select_pcs = false;
struct phylink *pl;
int ret;
- /* Validate the supplied configuration */
if (mac_ops->mac_select_pcs &&
+ mac_ops->mac_select_pcs(config, PHY_INTERFACE_MODE_NA) !=
+ ERR_PTR(-EOPNOTSUPP))
+ using_mac_select_pcs = true;
+
+ /* Validate the supplied configuration */
+ if (using_mac_select_pcs &&
phy_interface_empty(config->supported_interfaces)) {
dev_err(config->dev,
"phylink: error: empty supported_interfaces but mac_select_pcs() method present\n");
@@ -1244,6 +1249,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
+ pl->using_mac_select_pcs = using_mac_select_pcs;
pl->phy_state.interface = iface;
pl->link_interface = iface;
if (iface == PHY_INTERFACE_MODE_MOCA)
@@ -1290,36 +1296,6 @@ struct phylink *phylink_create(struct phylink_config *config,
EXPORT_SYMBOL_GPL(phylink_create);
/**
- * phylink_set_pcs() - set the current PCS for phylink to use
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @pcs: a pointer to the &struct phylink_pcs
- *
- * Bind the MAC PCS to phylink. This may be called after phylink_create().
- * If it is desired to dynamically change the PCS, then the preferred method
- * is to use mac_select_pcs(), but it may also be called in mac_prepare()
- * or mac_config().
- *
- * Please note that there are behavioural changes with the mac_config()
- * callback if a PCS is present (denoting a newer setup) so removing a PCS
- * is not supported, and if a PCS is going to be used, it must be registered
- * by calling phylink_set_pcs() at the latest in the first mac_config() call.
- */
-void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs)
-{
- pl->pcs = pcs;
- pl->pcs_ops = pcs->ops;
-
- if (!pl->phylink_disable_state &&
- pl->cfg_link_an_mode == MLO_AN_INBAND) {
- if (pl->config->pcs_poll || pcs->poll)
- mod_timer(&pl->link_poll, jiffies + HZ);
- else
- del_timer(&pl->link_poll);
- }
-}
-EXPORT_SYMBOL_GPL(phylink_set_pcs);
-
-/**
* phylink_destroy() - cleanup and destroy the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -1403,11 +1379,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
ret = phylink_validate(pl, supported, &config);
if (ret) {
- phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+ phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %pe\n",
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
- ret);
+ ERR_PTR(ret));
return ret;
}
@@ -1684,7 +1660,6 @@ void phylink_start(struct phylink *pl)
poll |= pl->config->poll_fixed_state;
break;
case MLO_AN_INBAND:
- poll |= pl->config->pcs_poll;
if (pl->pcs)
poll |= pl->pcs->poll;
break;
@@ -2607,8 +2582,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
/* Ignore errors if we're expecting a PHY to attach later */
ret = phylink_validate(pl, support, &config);
if (ret) {
- phylink_err(pl, "validation with support %*pb failed: %d\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ phylink_err(pl, "validation with support %*pb failed: %pe\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
return ret;
}
@@ -2624,10 +2600,12 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
linkmode_copy(support1, support);
ret = phylink_validate(pl, support1, &config);
if (ret) {
- phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n",
+ phylink_err(pl,
+ "validation of %s/%s with support %*pb failed: %pe\n",
phylink_an_mode_str(mode),
phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
return ret;
}
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index c1512c9925a6..15aa5ac1ff49 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -75,6 +75,12 @@ static const struct sfp_quirk sfp_quirks[] = {
.part = "MA5671A",
.modes = sfp_quirk_2500basex,
}, {
+ // Lantech 8330-262D-E can operate at 2500base-X, but
+ // incorrectly report 2500MBd NRZ in their EEPROM
+ .vendor = "Lantech",
+ .part = "8330-262D-E",
+ .modes = sfp_quirk_2500basex,
+ }, {
.vendor = "UBNT",
.part = "UF-INSTANT",
.modes = sfp_quirk_ubnt_uf_instant,
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4720b24ca51b..4dfb79807823 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -471,8 +471,8 @@ static unsigned int sfp_soft_get_state(struct sfp *sfp)
state |= SFP_F_TX_FAULT;
} else {
dev_err_ratelimited(sfp->dev,
- "failed to read SFP soft status: %d\n",
- ret);
+ "failed to read SFP soft status: %pe\n",
+ ERR_PTR(ret));
/* Preserve the current state */
state = sfp->state;
}
@@ -1311,7 +1311,8 @@ static void sfp_hwmon_probe(struct work_struct *work)
mod_delayed_work(system_wq, &sfp->hwmon_probe,
T_PROBE_RETRY_SLOW);
} else {
- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+ ERR_PTR(err));
}
return;
}
@@ -1516,14 +1517,15 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
if (phy == ERR_PTR(-ENODEV))
return PTR_ERR(phy);
if (IS_ERR(phy)) {
- dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+ dev_err(sfp->dev, "mdiobus scan returned %pe\n", phy);
return PTR_ERR(phy);
}
err = phy_device_register(phy);
if (err) {
phy_device_free(phy);
- dev_err(sfp->dev, "phy_device_register failed: %d\n", err);
+ dev_err(sfp->dev, "phy_device_register failed: %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -1531,7 +1533,7 @@ static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
if (err) {
phy_device_remove(phy);
phy_device_free(phy);
- dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err);
+ dev_err(sfp->dev, "sfp_add_phy failed: %pe\n", ERR_PTR(err));
return err;
}
@@ -1708,7 +1710,7 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
+ dev_err(sfp->dev, "Failed to read EEPROM: %pe\n", ERR_PTR(err));
return -EAGAIN;
}
@@ -1726,7 +1728,8 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
if (err != sizeof(val)) {
- dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
+ dev_err(sfp->dev, "Failed to write EEPROM: %pe\n",
+ ERR_PTR(err));
return -EAGAIN;
}
@@ -1778,7 +1781,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
id->base.connector = SFF8024_CONNECTOR_LC;
err = sfp_write(sfp, false, SFP_PHYS_ID, &id->base, 3);
if (err != 3) {
- dev_err(sfp->dev, "Failed to rewrite module EEPROM: %d\n", err);
+ dev_err(sfp->dev,
+ "Failed to rewrite module EEPROM: %pe\n",
+ ERR_PTR(err));
return err;
}
@@ -1789,7 +1794,9 @@ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
check = sfp_check(&id->base, sizeof(id->base) - 1);
err = sfp_write(sfp, false, SFP_CC_BASE, &check, 1);
if (err != 1) {
- dev_err(sfp->dev, "Failed to update base structure checksum in fiber module EEPROM: %d\n", err);
+ dev_err(sfp->dev,
+ "Failed to update base structure checksum in fiber module EEPROM: %pe\n",
+ ERR_PTR(err));
return err;
}
}
@@ -1814,12 +1821,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ dev_err(sfp->dev, "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.base)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret));
return -EAGAIN;
}
@@ -1839,13 +1847,15 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n",
- ret);
+ dev_err(sfp->dev,
+ "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.base)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
}
@@ -1887,12 +1897,13 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
if (ret < 0) {
if (report)
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
+ dev_err(sfp->dev, "failed to read EEPROM: %pe\n",
+ ERR_PTR(ret));
return -EAGAIN;
}
if (ret != sizeof(id.ext)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
+ dev_err(sfp->dev, "EEPROM short read: %pe\n", ERR_PTR(ret));
return -EAGAIN;
}
@@ -2046,7 +2057,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
err = sfp_hwmon_insert(sfp);
if (err)
- dev_warn(sfp->dev, "hwmon probe failed: %d\n", err);
+ dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
+ ERR_PTR(err));
sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0);
fallthrough;
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 0d491b4d6667..dafd3e9ebbf8 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -676,7 +676,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
- netif_rx_ni(rcv->skb);
+ netif_rx(rcv->skb);
dev->stats.rx_bytes += rcv->length.h;
dev->stats.rx_packets++;
rcv->skb = NULL;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 1a95f3beb784..39e61e07e489 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -109,7 +109,7 @@ static int rionet_rx_clean(struct net_device *ndev)
skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
rnet->rx_skb[i]->protocol =
eth_type_trans(rnet->rx_skb[i], ndev);
- error = netif_rx(rnet->rx_skb[i]);
+ error = __netif_rx(rnet->rx_skb[i]);
if (error == NET_RX_DROP) {
ndev->stats.rx_dropped++;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 57a6d598467b..c3f8020571ad 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -872,7 +872,7 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3
/* datagram completed: send to upper level */
skb_trim(skb, dlen);
- netif_rx(skb);
+ __netif_rx(skb);
stats->rx_bytes+=dlen;
stats->rx_packets++;
lp->rx_skb[ns] = NULL;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 98f586f910fb..88396ff99f03 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -368,7 +368,7 @@ static void sl_bump(struct slip *sl)
skb_put_data(skb, sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
- netif_rx_ni(skb);
+ netif_rx(skb);
dev->stats.rx_packets++;
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 8e3a28ba6b28..c3d42062559d 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -322,6 +322,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct tap_dev *tap;
struct tap_queue *q;
netdev_features_t features = TAP_FEATURES;
+ enum skb_drop_reason drop_reason;
tap = tap_dev_get_rcu(dev);
if (!tap)
@@ -343,12 +344,16 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
struct sk_buff *next;
- if (IS_ERR(segs))
+ if (IS_ERR(segs)) {
+ drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
goto drop;
+ }
if (!segs) {
- if (ptr_ring_produce(&q->ring, skb))
+ if (ptr_ring_produce(&q->ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
goto wake_up;
}
@@ -356,8 +361,9 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
skb_list_walk_safe(segs, skb, next) {
skb_mark_not_on_list(skb);
if (ptr_ring_produce(&q->ring, skb)) {
- kfree_skb(skb);
- kfree_skb_list(next);
+ drop_reason = SKB_DROP_REASON_FULL_RING;
+ kfree_skb_reason(skb, drop_reason);
+ kfree_skb_list_reason(next, drop_reason);
break;
}
}
@@ -369,10 +375,14 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
*/
if (skb->ip_summed == CHECKSUM_PARTIAL &&
!(features & NETIF_F_CSUM_MASK) &&
- skb_checksum_help(skb))
+ skb_checksum_help(skb)) {
+ drop_reason = SKB_DROP_REASON_SKB_CSUM;
goto drop;
- if (ptr_ring_produce(&q->ring, skb))
+ }
+ if (ptr_ring_produce(&q->ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
}
wake_up:
@@ -383,7 +393,7 @@ drop:
/* Count errors/drops only here, thus don't care about args. */
if (tap->count_rx_dropped)
tap->count_rx_dropped(tap);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
return RX_HANDLER_CONSUMED;
}
EXPORT_SYMBOL_GPL(tap_handle_frame);
@@ -632,6 +642,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
int depth;
bool zerocopy = false;
size_t linear;
+ enum skb_drop_reason drop_reason;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
@@ -696,8 +707,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
- if (err)
+ if (err) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto err_kfree;
+ }
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_header(skb);
@@ -706,8 +719,10 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (vnet_hdr_len) {
err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
tap_is_little_endian(q));
- if (err)
+ if (err) {
+ drop_reason = SKB_DROP_REASON_DEV_HDR;
goto err_kfree;
+ }
}
skb_probe_transport_header(skb);
@@ -738,7 +753,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
return total_len;
err_kfree:
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
err:
rcu_read_lock();
@@ -1198,7 +1213,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m,
struct xdp_buff *xdp;
int i;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
for (i = 0; i < ctl->num; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
tap_get_user_xdp(q, xdp);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8b2adc56b92a..b07dde6f0abf 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -734,6 +734,11 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
port = team_port_get_rcu(skb->dev);
team = port->team;
if (!team_port_enabled(port)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ /* link-local packets are mostly useful when stack receives them
+ * with the link they arrive on.
+ */
+ return RX_HANDLER_PASS;
/* allow exact match delivery for disabled ports */
res = RX_HANDLER_EXACT;
} else {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fed85447701a..276a0e42ca8e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1058,6 +1058,7 @@ static unsigned int run_ebpf_filter(struct tun_struct *tun,
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ enum skb_drop_reason drop_reason;
int txq = skb->queue_mapping;
struct netdev_queue *queue;
struct tun_file *tfile;
@@ -1067,8 +1068,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tfile = rcu_dereference(tun->tfiles[txq]);
/* Drop packet if interface is not attached */
- if (!tfile)
+ if (!tfile) {
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
+ }
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
@@ -1078,19 +1081,32 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
- if (!check_filter(&tun->txflt, skb))
+ if (!check_filter(&tun->txflt, skb)) {
+ drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
goto drop;
+ }
if (tfile->socket.sk->sk_filter &&
- sk_filter(tfile->socket.sk, skb))
+ sk_filter(tfile->socket.sk, skb)) {
+ drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
goto drop;
+ }
len = run_ebpf_filter(tun, skb, len);
- if (len == 0 || pskb_trim(skb, len))
+ if (len == 0) {
+ drop_reason = SKB_DROP_REASON_TAP_FILTER;
goto drop;
+ }
+
+ if (pskb_trim(skb, len)) {
+ drop_reason = SKB_DROP_REASON_NOMEM;
+ goto drop;
+ }
- if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+ if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
goto drop;
+ }
skb_tx_timestamp(skb);
@@ -1101,8 +1117,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
- if (ptr_ring_produce(&tfile->tx_ring, skb))
+ if (ptr_ring_produce(&tfile->tx_ring, skb)) {
+ drop_reason = SKB_DROP_REASON_FULL_RING;
goto drop;
+ }
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
@@ -1117,9 +1135,9 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
skb_tx_error(skb);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
rcu_read_unlock();
return NET_XMIT_DROP;
}
@@ -1273,7 +1291,7 @@ resample:
void *frame = tun_xdp_to_ptr(xdp);
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
break;
}
nxmit++;
@@ -1608,7 +1626,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
trace_xdp_exception(tun->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
break;
}
@@ -1717,6 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash = 0;
int skb_xdp = 1;
bool frags = tun_napi_frags_enabled(tfile);
+ enum skb_drop_reason drop_reason;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
@@ -1778,7 +1797,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
return PTR_ERR(skb);
}
if (!skb)
@@ -1807,7 +1826,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
if (frags)
mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
@@ -1820,9 +1839,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
err = -EFAULT;
+ drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
drop:
- atomic_long_inc(&tun->dev->rx_dropped);
- kfree_skb(skb);
+ dev_core_stats_rx_dropped_inc(tun->dev);
+ kfree_skb_reason(skb, drop_reason);
if (frags) {
tfile->napi.skb = NULL;
mutex_unlock(&tfile->napi_mutex);
@@ -1856,7 +1876,7 @@ drop:
pi.proto = htons(ETH_P_IPV6);
break;
default:
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
kfree_skb(skb);
return -EINVAL;
}
@@ -1869,6 +1889,7 @@ drop:
case IFF_TAP:
if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
err = -ENOMEM;
+ drop_reason = SKB_DROP_REASON_HDR_TRUNC;
goto drop;
}
skb->protocol = eth_type_trans(skb, tun->dev);
@@ -1922,6 +1943,7 @@ drop:
if (unlikely(!(tun->dev->flags & IFF_UP))) {
err = -EIO;
rcu_read_unlock();
+ drop_reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
@@ -1934,7 +1956,7 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
- atomic_long_inc(&tun->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(tun->dev);
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
@@ -1962,7 +1984,7 @@ drop:
} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
tun_rx_batched(tun, tfile, skb, more);
} else {
- netif_rx_ni(skb);
+ netif_rx(skb);
}
rcu_read_unlock();
@@ -2388,9 +2410,10 @@ static int tun_xdp_one(struct tun_struct *tun,
struct virtio_net_hdr *gso = &hdr->gso;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
+ struct sk_buff_head *queue;
u32 rxhash = 0, act;
int buflen = hdr->buflen;
- int err = 0;
+ int ret = 0;
bool skb_xdp = false;
struct page *page;
@@ -2405,13 +2428,13 @@ static int tun_xdp_one(struct tun_struct *tun,
xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- err = tun_xdp_act(tun, xdp_prog, xdp, act);
- if (err < 0) {
+ ret = tun_xdp_act(tun, xdp_prog, xdp, act);
+ if (ret < 0) {
put_page(virt_to_head_page(xdp->data));
- return err;
+ return ret;
}
- switch (err) {
+ switch (ret) {
case XDP_REDIRECT:
*flush = true;
fallthrough;
@@ -2435,7 +2458,7 @@ static int tun_xdp_one(struct tun_struct *tun,
build:
skb = build_skb(xdp->data_hard_start, buflen);
if (!skb) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -2445,7 +2468,7 @@ build:
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -2455,16 +2478,27 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- err = do_xdp_generic(xdp_prog, skb);
- if (err != XDP_PASS)
+ ret = do_xdp_generic(xdp_prog, skb);
+ if (ret != XDP_PASS) {
+ ret = 0;
goto out;
+ }
}
if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
!tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
- netif_receive_skb(skb);
+ if (tfile->napi_enabled) {
+ queue = &tfile->sk.sk_write_queue;
+ spin_lock(&queue->lock);
+ __skb_queue_tail(queue, skb);
+ spin_unlock(&queue->lock);
+ ret = 1;
+ } else {
+ netif_receive_skb(skb);
+ ret = 0;
+ }
/* No need to disable preemption here since this function is
* always called with bh disabled
@@ -2475,7 +2509,7 @@ build:
tun_flow_update(tun, rxhash, tfile);
out:
- return err;
+ return ret;
}
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
@@ -2489,10 +2523,11 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (!tun)
return -EBADFD;
- if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
+ ctl && ctl->type == TUN_MSG_PTR) {
struct tun_page tpage;
int n = ctl->num;
- int flush = 0;
+ int flush = 0, queued = 0;
memset(&tpage, 0, sizeof(tpage));
@@ -2501,12 +2536,17 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
+ if (ret > 0)
+ queued += ret;
}
if (flush)
xdp_do_flush();
+ if (tfile->napi_enabled && queued > 0)
+ napi_schedule(&tfile->napi);
+
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index b554054a7560..e62fc4f2aee0 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -358,6 +358,7 @@ config USB_NET_SMSC95XX
select BITREVERSE
select CRC16
select CRC32
+ imply NET_SELFTESTS
help
This option adds support for SMSC LAN95XX based USB 2.0
10/100 Ethernet adapters.
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 2a1e31defe71..2c81236c6c7c 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -158,6 +158,8 @@
#define AX_EEPROM_MAGIC 0xdeadbeef
#define AX_EEPROM_LEN 0x200
+#define AX_EMBD_PHY_ADDR 0x10
+
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
@@ -177,14 +179,16 @@ struct asix_rx_fixup_info {
struct asix_common_private {
void (*resume)(struct usbnet *dev);
void (*suspend)(struct usbnet *dev);
+ int (*reset)(struct usbnet *dev, int in_pm);
u16 presvd_phy_advertise;
u16 presvd_phy_bmcr;
struct asix_rx_fixup_info rx_fixup_info;
struct mii_bus *mdio;
struct phy_device *phydev;
+ struct phy_device *phydev_int;
u16 phy_addr;
- char phy_name[20];
bool embd_phy;
+ u8 chipcode;
};
extern const struct driver_info ax88172a_info;
@@ -192,8 +196,8 @@ extern const struct driver_info ax88172a_info;
/* ASIX specific flags */
#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm);
+int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm);
int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 71682970be58..632fa6c1d5e3 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -11,8 +11,8 @@
#define AX_HOST_EN_RETRIES 30
-int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+int __must_check asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
@@ -27,9 +27,12 @@ int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
index, ret);
+ }
return ret;
}
@@ -79,7 +82,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
0, 0, 1, &smsr, in_pm);
if (ret == -ENODEV)
break;
- else if (ret < sizeof(smsr))
+ else if (ret < 0)
continue;
else if (smsr & AX_HOST_EN)
break;
@@ -488,7 +491,8 @@ void asix_set_multicast(struct net_device *net)
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
-int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+static int __asix_mdio_read(struct net_device *netdev, int phy_id, int loc,
+ bool in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
@@ -496,18 +500,18 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
- ret = asix_check_host_enable(dev, 0);
+ ret = asix_check_host_enable(dev, in_pm);
if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, (__u16)loc, 2,
- &res, 0);
+ &res, in_pm);
if (ret < 0)
goto out;
- ret = asix_set_hw_mii(dev, 0);
+ ret = asix_set_hw_mii(dev, in_pm);
out:
mutex_unlock(&dev->phy_mutex);
@@ -517,8 +521,13 @@ out:
return ret < 0 ? ret : le16_to_cpu(res);
}
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ return __asix_mdio_read(netdev, phy_id, loc, false);
+}
+
static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
- int val)
+ int val, bool in_pm)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
@@ -529,16 +538,16 @@ static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
mutex_lock(&dev->phy_mutex);
- ret = asix_check_host_enable(dev, 0);
+ ret = asix_check_host_enable(dev, in_pm);
if (ret == -ENODEV)
goto out;
ret = asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2,
- &res, 0);
+ &res, in_pm);
if (ret < 0)
goto out;
- ret = asix_set_hw_mii(dev, 0);
+ ret = asix_set_hw_mii(dev, in_pm);
out:
mutex_unlock(&dev->phy_mutex);
@@ -547,7 +556,7 @@ out:
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
{
- __asix_mdio_write(netdev, phy_id, loc, val);
+ __asix_mdio_write(netdev, phy_id, loc, val, false);
}
/* MDIO read and write wrappers for phylib */
@@ -555,63 +564,25 @@ int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct usbnet *priv = bus->priv;
- return asix_mdio_read(priv->net, phy_id, regnum);
+ return __asix_mdio_read(priv->net, phy_id, regnum, false);
}
int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
{
struct usbnet *priv = bus->priv;
- return __asix_mdio_write(priv->net, phy_id, regnum, val);
+ return __asix_mdio_write(priv->net, phy_id, regnum, val, false);
}
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res;
- int ret;
-
- mutex_lock(&dev->phy_mutex);
-
- ret = asix_check_host_enable(dev, 1);
- if (ret == -ENODEV || ret == -ETIMEDOUT) {
- mutex_unlock(&dev->phy_mutex);
- return ret;
- }
-
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res, 1);
- asix_set_hw_mii(dev, 1);
- mutex_unlock(&dev->phy_mutex);
-
- netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, le16_to_cpu(res));
-
- return le16_to_cpu(res);
+ return __asix_mdio_read(netdev, phy_id, loc, true);
}
void
asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res = cpu_to_le16(val);
- int ret;
-
- netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
-
- mutex_lock(&dev->phy_mutex);
-
- ret = asix_check_host_enable(dev, 1);
- if (ret == -ENODEV) {
- mutex_unlock(&dev->phy_mutex);
- return;
- }
-
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
- (__u16)loc, 2, &res, 1);
- asix_set_hw_mii(dev, 1);
- mutex_unlock(&dev->phy_mutex);
+ __asix_mdio_write(netdev, phy_id, loc, val, true);
}
void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 4514d35ef4c4..38e47a93fb83 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -450,7 +450,6 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
struct asix_data *data = (struct asix_data *)&dev->data;
struct asix_common_private *priv = dev->driver_priv;
u16 rx_ctl, phy14h, phy15h, phy16h;
- u8 chipcode = 0;
int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm);
@@ -493,12 +492,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
goto out;
}
- ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0,
- 0, 1, &chipcode, in_pm);
- if (ret < 0)
- goto out;
-
- if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772B_CHIPCODE) {
+ if (priv->chipcode == AX_AX88772B_CHIPCODE) {
ret = asix_write_cmd(dev, AX_QCTCTRL, 0x8000, 0x8001,
0, NULL, in_pm);
if (ret < 0) {
@@ -506,7 +500,7 @@ static int ax88772a_hw_reset(struct usbnet *dev, int in_pm)
ret);
goto out;
}
- } else if ((chipcode & AX_CHIPCODE_MASK) == AX_AX88772A_CHIPCODE) {
+ } else if (priv->chipcode == AX_AX88772A_CHIPCODE) {
/* Check if the PHY registers have default settings */
phy14h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id,
AX88772A_PHY14H);
@@ -625,27 +619,13 @@ static void ax88772_resume(struct usbnet *dev)
int i;
for (i = 0; i < 3; i++)
- if (!ax88772_hw_reset(dev, 1))
+ if (!priv->reset(dev, 1))
break;
if (netif_running(dev->net))
phy_start(priv->phydev);
}
-static void ax88772a_resume(struct usbnet *dev)
-{
- struct asix_common_private *priv = dev->driver_priv;
- int i;
-
- for (i = 0; i < 3; i++) {
- if (!ax88772a_hw_reset(dev, 1))
- break;
- }
-
- if (netif_running(dev->net))
- phy_start(priv->phydev);
-}
-
static int asix_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -681,15 +661,16 @@ static int ax88772_init_phy(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
int ret;
- snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
+ priv->phydev = mdiobus_get_phy(priv->mdio, priv->phy_addr);
+ if (!priv->phydev) {
+ netdev_err(dev->net, "Could not find PHY\n");
+ return -ENODEV;
+ }
- priv->phydev = phy_connect(dev->net, priv->phy_name, &asix_adjust_link,
- PHY_INTERFACE_MODE_INTERNAL);
- if (IS_ERR(priv->phydev)) {
- netdev_err(dev->net, "Could not connect to PHY device %s\n",
- priv->phy_name);
- ret = PTR_ERR(priv->phydev);
+ ret = phy_connect_direct(dev->net, priv->phydev, &asix_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (ret) {
+ netdev_err(dev->net, "Could not connect PHY\n");
return ret;
}
@@ -698,13 +679,29 @@ static int ax88772_init_phy(struct usbnet *dev)
phy_attached_info(priv->phydev);
+ if (priv->embd_phy)
+ return 0;
+
+ /* In case main PHY is not the embedded PHY and MAC is RMII clock
+ * provider, we need to suspend embedded PHY by keeping PLL enabled
+ * (AX_SWRESET_IPPD == 0).
+ */
+ priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR);
+ if (!priv->phydev_int) {
+ netdev_err(dev->net, "Could not find internal PHY\n");
+ return -ENODEV;
+ }
+
+ priv->phydev_int->mac_managed_pm = 1;
+ phy_suspend(priv->phydev_int);
+
return 0;
}
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- u8 buf[ETH_ALEN] = {0}, chipcode = 0;
struct asix_common_private *priv;
+ u8 buf[ETH_ALEN] = {0};
int ret, i;
priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
@@ -753,14 +750,25 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
priv->phy_addr = ret;
- priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10);
+ priv->embd_phy = ((priv->phy_addr & 0x1f) == AX_EMBD_PHY_ADDR);
- asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
- chipcode &= AX_CHIPCODE_MASK;
+ ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1,
+ &priv->chipcode, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret);
+ return ret;
+ }
+
+ priv->chipcode &= AX_CHIPCODE_MASK;
- ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
- ax88772a_hw_reset(dev, 0);
+ priv->resume = ax88772_resume;
+ priv->suspend = ax88772_suspend;
+ if (priv->chipcode == AX_AX88772_CHIPCODE)
+ priv->reset = ax88772_hw_reset;
+ else
+ priv->reset = ax88772a_hw_reset;
+ ret = priv->reset(dev, 0);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
return ret;
@@ -775,13 +783,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
priv->presvd_phy_bmcr = 0;
priv->presvd_phy_advertise = 0;
- if (chipcode == AX_AX88772_CHIPCODE) {
- priv->resume = ax88772_resume;
- priv->suspend = ax88772_suspend;
- } else {
- priv->resume = ax88772a_resume;
- priv->suspend = ax88772_suspend;
- }
ret = ax88772_init_mdio(dev);
if (ret)
@@ -858,7 +859,6 @@ static int marvell_phy_init(struct usbnet *dev)
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
- reg &= 0xfc0f;
}
return 0;
@@ -920,11 +920,21 @@ static int ax88178_reset(struct usbnet *dev)
int gpio0 = 0;
u32 phyid;
- asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read GPIOS: %d\n", ret);
+ return ret;
+ }
+
netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0);
- asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read EEPROM: %d\n", ret);
+ return ret;
+ }
+
asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0);
netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c0b8b4aa78f3..c89639381eca 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -21,6 +21,7 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/ipv6_stubs.h>
+#include <net/ndisc.h>
/* alternative VLAN for IP session 0 if not untagged */
#define MBIM_IPS0_VID 4094
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 13a9a83b8538..46af78caf457 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -56,7 +56,7 @@
struct gl_packet {
__le32 packet_length;
- char packet_data [1];
+ char packet_data[];
};
struct gl_header {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f97813a4e8d1..f8221a7acf62 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2319,7 +2319,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
{
struct hso_device *hso_dev;
- hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC);
+ hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL);
if (!hso_dev)
return NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index a0f29482294d..4ef61f6b85df 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -20,6 +20,8 @@
#include <linux/of_net.h>
#include <linux/mdio.h>
#include <linux/phy.h>
+#include <net/selftests.h>
+
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
@@ -739,6 +741,26 @@ static u32 smsc95xx_get_link(struct net_device *net)
return net->phydev->link;
}
+static void smsc95xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
+ }
+}
+
+static int smsc95xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = smsc95xx_get_link,
.nway_reset = phy_ethtool_nway_reset,
@@ -755,6 +777,9 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
+ .self_test = net_selftest,
+ .get_strings = smsc95xx_ethtool_get_strings,
+ .get_sset_count = smsc95xx_ethtool_get_sset_count,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d29fb9759cc9..1b5714926d81 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -287,7 +287,7 @@ static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
{
return __dev_forward_skb(dev, skb) ?: xdp ?
veth_xdp_rx(rq, skb) :
- netif_rx(skb);
+ __netif_rx(skb);
}
/* return true if the specified skb has chances of GRO aggregation
@@ -433,21 +433,6 @@ static void veth_set_multicast_list(struct net_device *dev)
{
}
-static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
- int buflen)
-{
- struct sk_buff *skb;
-
- skb = build_skb(head, buflen);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, headroom);
- skb_put(skb, len);
-
- return skb;
-}
-
static int veth_select_rxq(struct net_device *dev)
{
return smp_processor_id() % dev->real_num_rx_queues;
@@ -494,7 +479,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame *frame = frames[i];
void *ptr = veth_xdp_to_ptr(frame);
- if (unlikely(frame->len > max_len ||
+ if (unlikely(xdp_get_frame_len(frame) > max_len ||
__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
@@ -695,72 +680,143 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
}
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
- struct sk_buff *skb,
- struct veth_xdp_tx_bq *bq,
- struct veth_stats *stats)
+static void veth_xdp_get(struct xdp_buff *xdp)
{
- u32 pktlen, headroom, act, metalen, frame_sz;
- void *orig_data, *orig_data_end;
- struct bpf_prog *xdp_prog;
- int mac_len, delta, off;
- struct xdp_buff xdp;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ int i;
- skb_prepare_for_gro(skb);
+ get_page(virt_to_page(xdp->data));
+ if (likely(!xdp_buff_has_frags(xdp)))
+ return;
- rcu_read_lock();
- xdp_prog = rcu_dereference(rq->xdp_prog);
- if (unlikely(!xdp_prog)) {
- rcu_read_unlock();
- goto out;
- }
+ for (i = 0; i < sinfo->nr_frags; i++)
+ __skb_frag_ref(&sinfo->frags[i]);
+}
- mac_len = skb->data - skb_mac_header(skb);
- pktlen = skb->len + mac_len;
- headroom = skb_headroom(skb) - mac_len;
+static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ struct xdp_buff *xdp,
+ struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ u32 frame_sz;
if (skb_shared(skb) || skb_head_is_locked(skb) ||
- skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
+ skb_shinfo(skb)->nr_frags) {
+ u32 size, len, max_head_size, off;
struct sk_buff *nskb;
- int size, head_off;
- void *head, *start;
struct page *page;
+ int i, head_off;
- size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (size > PAGE_SIZE)
+ /* We need a private copy of the skb and data buffers since
+ * the ebpf program can modify it. We segment the original skb
+ * into order-0 pages without linearize it.
+ *
+ * Make sure we have enough space for linear and paged area
+ */
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+ VETH_XDP_HEADROOM);
+ if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
goto drop;
+ /* Allocate skb head */
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
goto drop;
- head = page_address(page);
- start = head + VETH_XDP_HEADROOM;
- if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
- page_frag_free(head);
+ nskb = build_skb(page_address(page), PAGE_SIZE);
+ if (!nskb) {
+ put_page(page);
goto drop;
}
- nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
- skb->len, PAGE_SIZE);
- if (!nskb) {
- page_frag_free(head);
+ skb_reserve(nskb, VETH_XDP_HEADROOM);
+ size = min_t(u32, skb->len, max_head_size);
+ if (skb_copy_bits(skb, 0, nskb->data, size)) {
+ consume_skb(nskb);
goto drop;
}
+ skb_put(nskb, size);
skb_copy_header(nskb, skb);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
+
+ /* Allocate paged area of new skb */
+ off = size;
+ len = skb->len - off;
+
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ size = min_t(u32, len, PAGE_SIZE);
+ skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
+ if (skb_copy_bits(skb, off, page_address(page),
+ size)) {
+ consume_skb(nskb);
+ goto drop;
+ }
+
+ len -= size;
+ off += size;
+ }
+
consume_skb(skb);
skb = nskb;
+ } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+ pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+ goto drop;
}
/* SKB "head" area always have tailroom for skb_shared_info */
frame_sz = skb_end_pointer(skb) - skb->head;
frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
- xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
+ skb_headlen(skb), true);
+
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
+ *pskb = skb;
+
+ return 0;
+drop:
+ consume_skb(skb);
+ *pskb = NULL;
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
+{
+ void *orig_data, *orig_data_end;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 act, metalen;
+ int off;
+
+ skb_prepare_for_gro(skb);
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (unlikely(!xdp_prog)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+ if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
+ goto drop;
orig_data = xdp.data;
orig_data_end = xdp.data_end;
@@ -771,7 +827,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
case XDP_PASS:
break;
case XDP_TX:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
@@ -783,7 +839,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
- get_page(virt_to_page(xdp.data));
+ veth_xdp_get(&xdp);
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
@@ -806,18 +862,27 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
/* check if bpf_xdp_adjust_head was used */
- delta = orig_data - xdp.data;
- off = mac_len + delta;
+ off = orig_data - xdp.data;
if (off > 0)
__skb_push(skb, off);
else if (off < 0)
__skb_pull(skb, -off);
- skb->mac_header -= delta;
+
+ skb_reset_mac_header(skb);
/* check if bpf_xdp_adjust_tail was used */
off = xdp.data_end - orig_data_end;
if (off != 0)
__skb_put(skb, off); /* positive on grow, negative on shrink */
+
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(&xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
skb->protocol = eth_type_trans(skb, rq->dev);
metalen = xdp.data - xdp.data_meta;
@@ -833,7 +898,7 @@ xdp_drop:
return NULL;
err_xdp:
rcu_read_unlock();
- page_frag_free(xdp.data);
+ xdp_return_buff(&xdp);
xdp_xmit:
return NULL;
}
@@ -855,7 +920,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
/* ndo_xdp_xmit */
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- stats->xdp_bytes += frame->len;
+ stats->xdp_bytes += xdp_get_frame_len(frame);
frame = veth_xdp_rcv_one(rq, frame, bq, stats);
if (frame) {
/* XDP_PASS */
@@ -1463,9 +1528,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
- peer->hard_header_len -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+ peer->hard_header_len;
+ /* Allow increasing the max_mtu if the program supports
+ * XDP fragments.
+ */
+ if (prog->aux->xdp_has_frags)
+ max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+
if (peer->mtu > max_mtu) {
NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
err = -ERANGE;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a801ea40908f..11f26b00a226 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3449,8 +3449,7 @@ static __init int virtio_net_driver_init(void)
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
-
- ret = register_virtio_driver(&virtio_net_driver);
+ ret = register_virtio_driver(&virtio_net_driver);
if (ret)
goto err_virtio;
return 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index e0b1ab99a359..85e362461d71 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -418,7 +418,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len);
else
this_cpu_inc(dev->dstats->rx_drps);
@@ -472,14 +472,13 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
memset(&fl6, 0, sizeof(fl6));
/* needed to match OIF rule */
- fl6.flowi6_oif = dev->ifindex;
+ fl6.flowi6_l3mdev = dev->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = iph->nexthdr;
- fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst) || dst == dst_null)
@@ -551,10 +550,10 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
memset(&fl4, 0, sizeof(fl4));
/* needed to match OIF rule */
- fl4.flowi4_oif = vrf_dev->ifindex;
+ fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
fl4.flowi4_tos = RT_TOS(ip4h->tos);
- fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
fl4.saddr = ip4h->saddr;
diff --git a/drivers/net/vxlan/Makefile b/drivers/net/vxlan/Makefile
new file mode 100644
index 000000000000..d4c255499b72
--- /dev/null
+++ b/drivers/net/vxlan/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the vxlan driver
+#
+
+obj-$(CONFIG_VXLAN) += vxlan.o
+
+vxlan-objs := vxlan_core.o vxlan_multicast.o vxlan_vnifilter.o
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan/vxlan_core.c
index 359d16780dbb..de97ff98d36e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -34,10 +34,10 @@
#include <net/ip6_checksum.h>
#endif
+#include "vxlan_private.h"
+
#define VXLAN_VERSION "0.1"
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
@@ -53,41 +53,15 @@ static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-static unsigned int vxlan_net_id;
-static struct rtnl_link_ops vxlan_link_ops;
+unsigned int vxlan_net_id;
-static const u8 all_zeros_mac[ETH_ALEN + 2];
+const u8 all_zeros_mac[ETH_ALEN + 2];
+static struct rtnl_link_ops vxlan_link_ops;
static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
-/* per-network namespace private data for this module */
-struct vxlan_net {
- struct list_head vxlan_list;
- struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
- struct notifier_block nexthop_notifier_block;
-};
-
-/* Forwarding table entry */
-struct vxlan_fdb {
- struct hlist_node hlist; /* linked list of entries */
- struct rcu_head rcu;
- unsigned long updated; /* jiffies */
- unsigned long used;
- struct list_head remotes;
- u8 eth_addr[ETH_ALEN];
- u16 state; /* see ndm_state */
- __be32 vni;
- u16 flags; /* see ndm_flags and below */
- struct list_head nh_list;
- struct nexthop __rcu *nh;
- struct vxlan_dev __rcu *vdev;
-};
-
-#define NTF_VXLAN_ADDED_BY_USER 0x100
-
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -98,17 +72,6 @@ static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline
-bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
-{
- if (a->sa.sa_family != b->sa.sa_family)
- return false;
- if (a->sa.sa_family == AF_INET6)
- return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
- else
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -135,12 +98,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
#else /* !CONFIG_IPV6 */
-static inline
-bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
-{
- return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -161,37 +118,6 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
}
#endif
-/* Virtual Network hash table head */
-static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
-{
- return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
-}
-
-/* Socket hash table head */
-static inline struct hlist_head *vs_head(struct net *net, __be16 port)
-{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-
- return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* First remote destination for a forwarding entry.
- * Guaranteed to be non-NULL because remotes are never deleted.
- */
-static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
-{
- if (rcu_access_pointer(fdb->nh))
- return NULL;
- return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
-}
-
-static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
-{
- if (rcu_access_pointer(fdb->nh))
- return NULL;
- return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
-}
-
/* Find VXLAN socket based on network namespace, address family, UDP port,
* enabled unshareable flags and socket device binding (see l3mdev with
* non-default VRF).
@@ -213,18 +139,29 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
- __be32 vni)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs,
+ int ifindex, __be32 vni,
+ struct vxlan_vni_node **vninode)
{
+ struct vxlan_vni_node *vnode;
struct vxlan_dev_node *node;
/* For flow based devices, map all packets to VNI 0 */
- if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ if (vs->flags & VXLAN_F_COLLECT_METADATA &&
+ !(vs->flags & VXLAN_F_VNIFILTER))
vni = 0;
hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
- if (node->vxlan->default_dst.remote_vni != vni)
+ if (!node->vxlan)
+ continue;
+ vnode = NULL;
+ if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ vnode = vxlan_vnifilter_lookup(node->vxlan, vni);
+ if (!vnode)
+ continue;
+ } else if (node->vxlan->default_dst.remote_vni != vni) {
continue;
+ }
if (IS_ENABLED(CONFIG_IPV6)) {
const struct vxlan_config *cfg = &node->vxlan->cfg;
@@ -234,6 +171,8 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
continue;
}
+ if (vninode)
+ *vninode = vnode;
return node->vxlan;
}
@@ -251,7 +190,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, ifindex, vni);
+ return vxlan_vs_find_vni(vs, ifindex, vni, NULL);
}
/* Fill in neighbour message in skbuff. */
@@ -493,7 +432,7 @@ static u32 eth_hash(const unsigned char *addr)
return hash_64(value, FDB_HASH_BITS);
}
-static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(addr + 2));
@@ -501,7 +440,7 @@ static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
-static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
+u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
{
if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
return eth_vni_hash(mac, vni);
@@ -872,37 +811,35 @@ static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
goto err_inval;
}
- if (nh) {
- if (!nexthop_get(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
- nh = NULL;
- goto err_inval;
- }
- if (!nexthop_is_fdb(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
- goto err_inval;
- }
+ if (!nexthop_get(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+ nh = NULL;
+ goto err_inval;
+ }
+ if (!nexthop_is_fdb(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop");
+ goto err_inval;
+ }
+
+ if (!nexthop_is_multipath(nh)) {
+ NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ goto err_inval;
+ }
- if (!nexthop_is_multipath(nh)) {
- NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group");
+ /* check nexthop group family */
+ switch (vxlan->default_dst.remote_ip.sa.sa_family) {
+ case AF_INET:
+ if (!nexthop_has_v4(nh)) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
goto err_inval;
}
-
- /* check nexthop group family */
- switch (vxlan->default_dst.remote_ip.sa.sa_family) {
- case AF_INET:
- if (!nexthop_has_v4(nh)) {
- err = -EAFNOSUPPORT;
- NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
- goto err_inval;
- }
- break;
- case AF_INET6:
- if (nexthop_has_v4(nh)) {
- err = -EAFNOSUPPORT;
- NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
- goto err_inval;
- }
+ break;
+ case AF_INET6:
+ if (nexthop_has_v4(nh)) {
+ err = -EAFNOSUPPORT;
+ NL_SET_ERR_MSG(extack, "Nexthop group family not supported");
+ goto err_inval;
}
}
@@ -920,12 +857,12 @@ err_inval:
return err;
}
-static int vxlan_fdb_create(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __be16 port, __be32 src_vni,
- __be32 vni, __u32 ifindex, __u16 ndm_flags,
- u32 nhid, struct vxlan_fdb **fdb,
- struct netlink_ext_ack *extack)
+int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -1150,13 +1087,13 @@ err_notify:
}
/* Add new entry to forwarding table -- assumes lock held */
-static int vxlan_fdb_update(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __u16 flags,
- __be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u16 ndm_flags, u32 nhid,
- bool swdev_notify,
- struct netlink_ext_ack *extack)
+int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
+ bool swdev_notify,
+ struct netlink_ext_ack *extack)
{
struct vxlan_fdb *f;
@@ -1307,10 +1244,10 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
- const unsigned char *addr, union vxlan_addr ip,
- __be16 port, __be32 src_vni, __be32 vni,
- u32 ifindex, bool swdev_notify)
+int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, bool swdev_notify)
{
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
@@ -1519,56 +1456,6 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
}
-/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
-{
- struct vxlan_dev *vxlan;
- struct vxlan_sock *sock4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct vxlan_sock *sock6;
-#endif
- unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
-
- sock4 = rtnl_dereference(dev->vn4_sock);
-
- /* The vxlan_sock is only used by dev, leaving group has
- * no effect on other vxlan devices.
- */
- if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
- return false;
-#if IS_ENABLED(CONFIG_IPV6)
- sock6 = rtnl_dereference(dev->vn6_sock);
- if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
- return false;
-#endif
-
- list_for_each_entry(vxlan, &vn->vxlan_list, next) {
- if (!netif_running(vxlan->dev) || vxlan == dev)
- continue;
-
- if (family == AF_INET &&
- rtnl_dereference(vxlan->vn4_sock) != sock4)
- continue;
-#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 &&
- rtnl_dereference(vxlan->vn6_sock) != sock6)
- continue;
-#endif
-
- if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
- &dev->default_dst.remote_ip))
- continue;
-
- if (vxlan->default_dst.remote_ifindex !=
- dev->default_dst.remote_ifindex)
- continue;
-
- return true;
- }
-
- return false;
-}
-
static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
@@ -1602,7 +1489,10 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
synchronize_net();
- vxlan_vs_del_dev(vxlan);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vs_del_vnigrp(vxlan);
+ else
+ vxlan_vs_del_dev(vxlan);
if (__vxlan_sock_release_prep(sock4)) {
udp_tunnel_sock_release(sock4->sock);
@@ -1617,76 +1507,6 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-/* Update multicast group membership when first VNI on
- * multicast address is brought up
- */
-static int vxlan_igmp_join(struct vxlan_dev *vxlan)
-{
- struct sock *sk;
- union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
- int ifindex = vxlan->default_dst.remote_ifindex;
- int ret = -EINVAL;
-
- if (ip->sa.sa_family == AF_INET) {
- struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
- .imr_ifindex = ifindex,
- };
-
- sk = sock4->sock->sk;
- lock_sock(sk);
- ret = ip_mc_join_group(sk, &mreq);
- release_sock(sk);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
-
- sk = sock6->sock->sk;
- lock_sock(sk);
- ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
- &ip->sin6.sin6_addr);
- release_sock(sk);
-#endif
- }
-
- return ret;
-}
-
-/* Inverse of vxlan_igmp_join when last VNI is brought down */
-static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
-{
- struct sock *sk;
- union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
- int ifindex = vxlan->default_dst.remote_ifindex;
- int ret = -EINVAL;
-
- if (ip->sa.sa_family == AF_INET) {
- struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
- .imr_ifindex = ifindex,
- };
-
- sk = sock4->sock->sk;
- lock_sock(sk);
- ret = ip_mc_leave_group(sk, &mreq);
- release_sock(sk);
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
-
- sk = sock6->sock->sk;
- lock_sock(sk);
- ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
- &ip->sin6.sin6_addr);
- release_sock(sk);
-#endif
- }
-
- return ret;
-}
-
static bool vxlan_remcsum(struct vxlanhdr *unparsed,
struct sk_buff *skb, u32 vxflags)
{
@@ -1828,6 +1648,7 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
+ struct vxlan_vni_node *vninode = NULL;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
@@ -1860,7 +1681,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan)
goto drop;
@@ -1930,6 +1751,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
}
@@ -1937,11 +1760,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- atomic_long_inc(&vxlan->dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(vxlan->dev);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
goto drop;
}
dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@@ -1975,7 +1801,7 @@ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
return -ENOENT;
vni = vxlan_vni(hdr->vx_vni);
- vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL);
if (!vxlan)
return -ENOENT;
@@ -2049,8 +1875,12 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
@@ -2204,9 +2034,11 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (reply == NULL)
goto out;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
-
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
@@ -2540,15 +2372,20 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
tx_stats->tx_packets++;
tx_stats->tx_bytes += len;
u64_stats_update_end(&tx_stats->syncp);
+ vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
+ len);
} else {
drop:
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(dst_vxlan, vni, NULL,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
}
rcu_read_unlock();
}
@@ -2578,6 +2415,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
return -ENOENT;
@@ -2601,15 +2440,19 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
union vxlan_addr remote_ip, local_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
- __be32 vni, label;
__u8 tos, ttl;
int ifindex;
int err;
u32 flags = vxlan->cfg.flags;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ __be32 vni = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ __be32 label;
+#endif
info = skb_tunnel_info(skb);
@@ -2647,7 +2490,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
else
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+#if IS_ENABLED(CONFIG_IPV6)
label = vxlan->cfg.label;
+#endif
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
@@ -2674,7 +2519,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = info->key.ttl;
tos = info->key.tos;
+#if IS_ENABLED(CONFIG_IPV6)
label = info->key.label;
+#endif
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
@@ -2821,12 +2668,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label, src_port, dst_port, !udp_sum);
#endif
}
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
out_unlock:
rcu_read_unlock();
return;
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2838,6 +2687,7 @@ tx_error:
dev->stats.tx_carrier_errors++;
dst_release(ndst);
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2870,6 +2720,8 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
}
@@ -2944,6 +2796,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -3044,6 +2898,9 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_init(vxlan);
+
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
@@ -3073,6 +2930,9 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ vxlan_vnigroup_uninit(vxlan);
+
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
@@ -3090,14 +2950,10 @@ static int vxlan_open(struct net_device *dev)
if (ret < 0)
return ret;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
- ret = vxlan_igmp_join(vxlan);
- if (ret == -EADDRINUSE)
- ret = 0;
- if (ret) {
- vxlan_sock_release(vxlan);
- return ret;
- }
+ ret = vxlan_multicast_join(vxlan);
+ if (ret) {
+ vxlan_sock_release(vxlan);
+ return ret;
}
if (vxlan->cfg.age_interval)
@@ -3134,19 +2990,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- int ret = 0;
- if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
- !vxlan_group_used(vn, vxlan))
- ret = vxlan_igmp_leave(vxlan);
+ vxlan_multicast_leave(vxlan);
del_timer_sync(&vxlan->age_timer);
vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
- return ret;
+ return 0;
}
/* Stub, nothing needs to be done. */
@@ -3369,6 +3221,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
[IFLA_VXLAN_DF] = { .type = NLA_U8 },
+ [IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -3554,6 +3407,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
@@ -3589,7 +3443,12 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
rcu_assign_pointer(vxlan->vn4_sock, vs);
node = &vxlan->hlist4;
}
- vxlan_vs_add_dev(vs, vxlan, node);
+
+ if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ vxlan_vs_add_vnigrp(vxlan, vs, ipv6);
+ else
+ vxlan_vs_add_dev(vs, vxlan, node);
+
return 0;
}
@@ -3616,13 +3475,42 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
return ret;
}
+int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ struct vxlan_config *conf, __be32 vni)
+{
+ struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
+ struct vxlan_dev *tmp;
+
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp == vxlan)
+ continue;
+ if (tmp->cfg.flags & VXLAN_F_VNIFILTER) {
+ if (!vxlan_vnifilter_lookup(tmp, vni))
+ continue;
+ } else if (tmp->cfg.vni != vni) {
+ continue;
+ }
+ if (tmp->cfg.dst_port != conf->dst_port)
+ continue;
+ if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
+ (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
+ continue;
+
+ if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
+ tmp->cfg.remote_ifindex != conf->remote_ifindex)
+ continue;
+
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
struct net_device **lower,
struct vxlan_dev *old,
struct netlink_ext_ack *extack)
{
- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *tmp;
bool use_ipv6 = false;
if (conf->flags & VXLAN_F_GPE) {
@@ -3755,22 +3643,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
if (!conf->age_interval)
conf->age_interval = FDB_AGE_DEFAULT;
- list_for_each_entry(tmp, &vn->vxlan_list, next) {
- if (tmp == old)
- continue;
-
- if (tmp->cfg.vni != conf->vni)
- continue;
- if (tmp->cfg.dst_port != conf->dst_port)
- continue;
- if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
- (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
- continue;
-
- if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
- tmp->cfg.remote_ifindex != conf->remote_ifindex)
- continue;
-
+ if (vxlan_vni_in_use(src_net, old, conf, conf->vni)) {
NL_SET_ERR_MSG(extack,
"A VXLAN device with the specified VNI already exists");
return -EEXIST;
@@ -4226,6 +4099,21 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_VXLAN_DF])
conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+ if (data[IFLA_VXLAN_VNIFILTER]) {
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_VNIFILTER,
+ VXLAN_F_VNIFILTER, changelink, false,
+ extack);
+ if (err)
+ return err;
+
+ if ((conf->flags & VXLAN_F_VNIFILTER) &&
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_VNIFILTER],
+ "vxlan vnifilter only valid in collect metadata mode");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -4301,6 +4189,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_ifindex,
true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+
+ /* If vni filtering device, also update fdb entries of
+ * all vnis that were using default remote ip
+ */
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip,
+ &conf.remote_ip, extack);
+ if (err) {
+ netdev_adjacent_change_abort(dst->remote_dev,
+ lowerdev, dev);
+ return err;
+ }
+ }
}
if (conf.age_interval != vxlan->cfg.age_interval)
@@ -4446,6 +4347,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER &&
+ nla_put_u8(skb, IFLA_VXLAN_VNIFILTER,
+ !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -4805,6 +4711,8 @@ static int __init vxlan_init_module(void)
if (rc)
goto out4;
+ vxlan_vnifilter_init();
+
return 0;
out4:
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
@@ -4819,6 +4727,7 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
+ vxlan_vnifilter_uninit();
rtnl_link_unregister(&vxlan_link_ops);
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
unregister_netdevice_notifier(&vxlan_notifier_block);
diff --git a/drivers/net/vxlan/vxlan_multicast.c b/drivers/net/vxlan/vxlan_multicast.c
new file mode 100644
index 000000000000..a7f2d67dc61b
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_multicast.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vxlan multicast group handling
+ *
+ */
+#include <linux/kernel.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <linux/igmp.h>
+#include <net/vxlan.h>
+
+#include "vxlan_private.h"
+
+/* Update multicast group membership when first VNI on
+ * multicast address is brought up
+ */
+int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
+ int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
+ int ret = -EINVAL;
+ struct sock *sk;
+
+ if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ sk = sock4->sock->sk;
+ lock_sock(sk);
+ ret = ip_mc_join_group(sk, &mreq);
+ release_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
+ lock_sock(sk);
+ ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+ &ip->sin6.sin6_addr);
+ release_sock(sk);
+#endif
+ }
+
+ return ret;
+}
+
+int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &vxlan->default_dst.remote_ip);
+ int ifindex = (rifindex ? : vxlan->default_dst.remote_ifindex);
+ int ret = -EINVAL;
+ struct sock *sk;
+
+ if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
+ sk = sock4->sock->sk;
+ lock_sock(sk);
+ ret = ip_mc_leave_group(sk, &mreq);
+ release_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
+ lock_sock(sk);
+ ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+ &ip->sin6.sin6_addr);
+ release_sock(sk);
+#endif
+ }
+
+ return ret;
+}
+
+static bool vxlan_group_used_match(union vxlan_addr *ip, int ifindex,
+ union vxlan_addr *rip, int rifindex)
+{
+ if (!vxlan_addr_multicast(rip))
+ return false;
+
+ if (!vxlan_addr_equal(rip, ip))
+ return false;
+
+ if (rifindex != ifindex)
+ return false;
+
+ return true;
+}
+
+static bool vxlan_group_used_by_vnifilter(struct vxlan_dev *vxlan,
+ union vxlan_addr *ip, int ifindex)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+
+ if (vxlan_group_used_match(ip, ifindex,
+ &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ return true;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+
+ if (vxlan_group_used_match(ip, ifindex,
+ &v->remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ return true;
+ }
+
+ return false;
+}
+
+/* See if multicast group is already in use by other ID */
+bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
+ __be32 vni, union vxlan_addr *rip, int rifindex)
+{
+ union vxlan_addr *ip = (rip ? : &dev->default_dst.remote_ip);
+ int ifindex = (rifindex ? : dev->default_dst.remote_ifindex);
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock *sock6;
+#endif
+ unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
+ /* The vxlan_sock is only used by dev, leaving group has
+ * no effect on other vxlan devices.
+ */
+ if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
+ return false;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
+ return false;
+#endif
+
+ list_for_each_entry(vxlan, &vn->vxlan_list, next) {
+ if (!netif_running(vxlan->dev) || vxlan == dev)
+ continue;
+
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
+ continue;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
+ continue;
+#endif
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ if (!vxlan_group_used_by_vnifilter(vxlan, ip, ifindex))
+ continue;
+ } else {
+ if (!vxlan_group_used_match(ip, ifindex,
+ &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex))
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static int vxlan_multicast_join_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp, *vgood = NULL;
+ int ret = 0;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+ /* skip if address is same as default address */
+ if (vxlan_addr_equal(&v->remote_ip,
+ &vxlan->default_dst.remote_ip))
+ continue;
+ ret = vxlan_igmp_join(vxlan, &v->remote_ip, 0);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ goto out;
+ vgood = v;
+ }
+out:
+ if (ret) {
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (!vxlan_addr_multicast(&v->remote_ip))
+ continue;
+ if (vxlan_addr_equal(&v->remote_ip,
+ &vxlan->default_dst.remote_ip))
+ continue;
+ vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
+ if (v == vgood)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int vxlan_multicast_leave_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+ int last_err = 0, ret;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (vxlan_addr_multicast(&v->remote_ip) &&
+ !vxlan_group_used(vn, vxlan, v->vni, &v->remote_ip,
+ 0)) {
+ ret = vxlan_igmp_leave(vxlan, &v->remote_ip, 0);
+ if (ret)
+ last_err = ret;
+ }
+ }
+
+ return last_err;
+}
+
+int vxlan_multicast_join(struct vxlan_dev *vxlan)
+{
+ int ret = 0;
+
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
+ ret = vxlan_igmp_join(vxlan, &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ return ret;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ return vxlan_multicast_join_vnigrp(vxlan);
+
+ return 0;
+}
+
+int vxlan_multicast_leave(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ int ret = 0;
+
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ !vxlan_group_used(vn, vxlan, 0, NULL, 0)) {
+ ret = vxlan_igmp_leave(vxlan, &vxlan->default_dst.remote_ip,
+ vxlan->default_dst.remote_ifindex);
+ if (ret)
+ return ret;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+ return vxlan_multicast_leave_vnigrp(vxlan);
+
+ return 0;
+}
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
new file mode 100644
index 000000000000..599c3b4fdd5e
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vxlan private header file
+ *
+ */
+
+#ifndef _VXLAN_PRIVATE_H
+#define _VXLAN_PRIVATE_H
+
+#include <linux/rhashtable.h>
+
+extern unsigned int vxlan_net_id;
+extern const u8 all_zeros_mac[ETH_ALEN + 2];
+extern const struct rhashtable_params vxlan_vni_rht_params;
+
+#define PORT_HASH_BITS 8
+#define PORT_HASH_SIZE (1 << PORT_HASH_BITS)
+
+/* per-network namespace private data for this module */
+struct vxlan_net {
+ struct list_head vxlan_list;
+ struct hlist_head sock_list[PORT_HASH_SIZE];
+ spinlock_t sock_lock;
+ struct notifier_block nexthop_notifier_block;
+};
+
+/* Forwarding table entry */
+struct vxlan_fdb {
+ struct hlist_node hlist; /* linked list of entries */
+ struct rcu_head rcu;
+ unsigned long updated; /* jiffies */
+ unsigned long used;
+ struct list_head remotes;
+ u8 eth_addr[ETH_ALEN];
+ u16 state; /* see ndm_state */
+ __be32 vni;
+ u16 flags; /* see ndm_flags and below */
+ struct list_head nh_list;
+ struct nexthop __rcu *nh;
+ struct vxlan_dev __rcu *vdev;
+};
+
+#define NTF_VXLAN_ADDED_BY_USER 0x100
+
+/* Virtual Network hash table head */
+static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
+{
+ return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
+}
+
+/* Socket hash table head */
+static inline struct hlist_head *vs_head(struct net *net, __be16 port)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+}
+
+/* First remote destination for a forwarding entry.
+ * Guaranteed to be non-NULL because remotes are never deleted.
+ */
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
+{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
+ return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+{
+ if (rcu_access_pointer(fdb->nh))
+ return NULL;
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ if (a->sa.sa_family != b->sa.sa_family)
+ return false;
+ if (a->sa.sa_family == AF_INET6)
+ return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+ else
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+#else /* !CONFIG_IPV6 */
+
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+#endif
+
+static inline struct vxlan_vni_node *
+vxlan_vnifilter_lookup(struct vxlan_dev *vxlan, __be32 vni)
+{
+ struct vxlan_vni_group *vg;
+
+ vg = rcu_dereference_rtnl(vxlan->vnigrp);
+ if (!vg)
+ return NULL;
+
+ return rhashtable_lookup_fast(&vg->vni_hash, &vni,
+ vxlan_vni_rht_params);
+}
+
+/* vxlan_core.c */
+int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
+ u32 nhid, struct vxlan_fdb **fdb,
+ struct netlink_ext_ack *extack);
+int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+ const unsigned char *addr, union vxlan_addr ip,
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, bool swdev_notify);
+u32 eth_vni_hash(const unsigned char *addr, __be32 vni);
+u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni);
+int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 src_vni, __be32 vni,
+ __u32 ifindex, __u16 ndm_flags, u32 nhid,
+ bool swdev_notify, struct netlink_ext_ack *extack);
+int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
+ struct vxlan_config *conf, __be32 vni);
+
+/* vxlan_vnifilter.c */
+int vxlan_vnigroup_init(struct vxlan_dev *vxlan);
+void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
+
+void vxlan_vnifilter_init(void);
+void vxlan_vnifilter_uninit(void);
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+ int type, unsigned int len);
+
+void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ bool ipv6);
+void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan);
+int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *new_remote_ip,
+ struct netlink_ext_ack *extack);
+
+
+/* vxlan_multicast.c */
+int vxlan_multicast_join(struct vxlan_dev *vxlan);
+int vxlan_multicast_leave(struct vxlan_dev *vxlan);
+bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev,
+ __be32 vni, union vxlan_addr *rip, int rifindex);
+int vxlan_igmp_join(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex);
+int vxlan_igmp_leave(struct vxlan_dev *vxlan, union vxlan_addr *rip,
+ int rifindex);
+#endif
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
new file mode 100644
index 000000000000..9f28d0b6a6b2
--- /dev/null
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vxlan vni filter for collect metadata mode
+ *
+ * Authors: Roopa Prabhu <roopa@nvidia.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <linux/rhashtable.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/vxlan.h>
+
+#include "vxlan_private.h"
+
+static inline int vxlan_vni_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct vxlan_vni_node *vnode = ptr;
+ __be32 vni = *(__be32 *)arg->key;
+
+ return vnode->vni != vni;
+}
+
+const struct rhashtable_params vxlan_vni_rht_params = {
+ .head_offset = offsetof(struct vxlan_vni_node, vnode),
+ .key_offset = offsetof(struct vxlan_vni_node, vni),
+ .key_len = sizeof(__be32),
+ .nelem_hint = 3,
+ .max_size = VXLAN_N_VID,
+ .obj_cmpfn = vxlan_vni_cmp,
+ .automatic_shrinking = true,
+};
+
+static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *v,
+ bool del)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_dev_node *node;
+ struct vxlan_sock *vs;
+
+ spin_lock(&vn->sock_lock);
+ if (del) {
+ if (!hlist_unhashed(&v->hlist4.hlist))
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!hlist_unhashed(&v->hlist6.hlist))
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ goto out;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ vs = rtnl_dereference(vxlan->vn6_sock);
+ if (vs && v) {
+ node = &v->hlist6;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+#endif
+ vs = rtnl_dereference(vxlan->vn4_sock);
+ if (vs && v) {
+ node = &v->hlist4;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+out:
+ spin_unlock(&vn->sock_lock);
+}
+
+void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ bool ipv6)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_vni_node *v, *tmp;
+ struct vxlan_dev_node *node;
+
+ if (!vg)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ node = &v->hlist6;
+ else
+#endif
+ node = &v->hlist4;
+ node->vxlan = vxlan;
+ hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
+ }
+ spin_unlock(&vn->sock_lock);
+}
+
+void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_vni_node *v, *tmp;
+
+ if (!vg)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ }
+ spin_unlock(&vn->sock_lock);
+}
+
+static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
+ struct vxlan_vni_stats *dest)
+{
+ int i;
+
+ memset(dest, 0, sizeof(*dest));
+ for_each_possible_cpu(i) {
+ struct vxlan_vni_stats_pcpu *pstats;
+ struct vxlan_vni_stats temp;
+ unsigned int start;
+
+ pstats = per_cpu_ptr(vninode->stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&pstats->syncp);
+ memcpy(&temp, &pstats->stats, sizeof(temp));
+ } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
+
+ dest->rx_packets += temp.rx_packets;
+ dest->rx_bytes += temp.rx_bytes;
+ dest->rx_drops += temp.rx_drops;
+ dest->rx_errors += temp.rx_errors;
+ dest->tx_packets += temp.tx_packets;
+ dest->tx_bytes += temp.tx_bytes;
+ dest->tx_drops += temp.tx_drops;
+ dest->tx_errors += temp.tx_errors;
+ }
+}
+
+static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
+
+ u64_stats_update_begin(&pstats->syncp);
+ switch (type) {
+ case VXLAN_VNI_STATS_RX:
+ pstats->stats.rx_bytes += len;
+ pstats->stats.rx_packets++;
+ break;
+ case VXLAN_VNI_STATS_RX_DROPS:
+ pstats->stats.rx_drops++;
+ break;
+ case VXLAN_VNI_STATS_RX_ERRORS:
+ pstats->stats.rx_errors++;
+ break;
+ case VXLAN_VNI_STATS_TX:
+ pstats->stats.tx_bytes += len;
+ pstats->stats.tx_packets++;
+ break;
+ case VXLAN_VNI_STATS_TX_DROPS:
+ pstats->stats.tx_drops++;
+ break;
+ case VXLAN_VNI_STATS_TX_ERRORS:
+ pstats->stats.tx_errors++;
+ break;
+ }
+ u64_stats_update_end(&pstats->syncp);
+}
+
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_node *vnode;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return;
+
+ if (vninode) {
+ vnode = vninode;
+ } else {
+ vnode = vxlan_vnifilter_lookup(vxlan, vni);
+ if (!vnode)
+ return;
+ }
+
+ vxlan_vnifilter_stats_add(vnode, type, len);
+}
+
+static u32 vnirange(struct vxlan_vni_node *vbegin,
+ struct vxlan_vni_node *vend)
+{
+ return (be32_to_cpu(vend->vni) - be32_to_cpu(vbegin->vni));
+}
+
+static size_t vxlan_vnifilter_entry_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct tunnel_msg))
+ + nla_total_size(0) /* VXLAN_VNIFILTER_ENTRY */
+ + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_START */
+ + nla_total_size(sizeof(u32)) /* VXLAN_VNIFILTER_ENTRY_END */
+ + nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */
+}
+
+static int __vnifilter_entry_fill_stats(struct sk_buff *skb,
+ const struct vxlan_vni_node *vbegin)
+{
+ struct vxlan_vni_stats vstats;
+ struct nlattr *vstats_attr;
+
+ vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS);
+ if (!vstats_attr)
+ goto out_stats_err;
+
+ vxlan_vnifilter_stats_get(vbegin, &vstats);
+ if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES,
+ vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS,
+ vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS,
+ vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES,
+ vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS,
+ vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS,
+ vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD))
+ goto out_stats_err;
+
+ nla_nest_end(skb, vstats_attr);
+
+ return 0;
+
+out_stats_err:
+ nla_nest_cancel(skb, vstats_attr);
+ return -EMSGSIZE;
+}
+
+static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb,
+ struct vxlan_vni_node *vbegin,
+ struct vxlan_vni_node *vend,
+ bool fill_stats)
+{
+ struct nlattr *ventry;
+ u32 vs = be32_to_cpu(vbegin->vni);
+ u32 ve = 0;
+
+ if (vbegin != vend)
+ ve = be32_to_cpu(vend->vni);
+
+ ventry = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY);
+ if (!ventry)
+ return false;
+
+ if (nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_START, vs))
+ goto out_err;
+
+ if (ve && nla_put_u32(skb, VXLAN_VNIFILTER_ENTRY_END, ve))
+ goto out_err;
+
+ if (!vxlan_addr_any(&vbegin->remote_ip)) {
+ if (vbegin->remote_ip.sa.sa_family == AF_INET) {
+ if (nla_put_in_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP,
+ vbegin->remote_ip.sin.sin_addr.s_addr))
+ goto out_err;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(skb, VXLAN_VNIFILTER_ENTRY_GROUP6,
+ &vbegin->remote_ip.sin6.sin6_addr))
+ goto out_err;
+#endif
+ }
+ }
+
+ if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin))
+ goto out_err;
+
+ nla_nest_end(skb, ventry);
+
+ return true;
+
+out_err:
+ nla_nest_cancel(skb, ventry);
+
+ return false;
+}
+
+static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode, int cmd)
+{
+ struct tunnel_msg *tmsg;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct net *net = dev_net(vxlan->dev);
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(vxlan_vnifilter_entry_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ goto out_err;
+
+ err = -EMSGSIZE;
+ nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*tmsg), 0);
+ if (!nlh)
+ goto out_err;
+ tmsg = nlmsg_data(nlh);
+ memset(tmsg, 0, sizeof(*tmsg));
+ tmsg->family = AF_BRIDGE;
+ tmsg->ifindex = vxlan->dev->ifindex;
+
+ if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false))
+ goto out_err;
+
+ nlmsg_end(skb, nlh);
+ rtnl_notify(skb, net, 0, RTNLGRP_TUNNEL, NULL, GFP_KERNEL);
+
+ return;
+
+out_err:
+ rtnl_set_sk_err(net, RTNLGRP_TUNNEL, err);
+
+ kfree_skb(skb);
+}
+
+static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct vxlan_vni_node *tmp, *v, *vbegin = NULL, *vend = NULL;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct tunnel_msg *new_tmsg, *tmsg;
+ int idx = 0, s_idx = cb->args[1];
+ struct vxlan_vni_group *vg;
+ struct nlmsghdr *nlh;
+ bool dump_stats;
+ int err = 0;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return -EINVAL;
+
+ /* RCU needed because of the vni locking rules (rcu || rtnl) */
+ vg = rcu_dereference(vxlan->vnigrp);
+ if (!vg || !vg->num_vnis)
+ return 0;
+
+ tmsg = nlmsg_data(cb->nlh);
+ dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS);
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+ new_tmsg = nlmsg_data(nlh);
+ memset(new_tmsg, 0, sizeof(*new_tmsg));
+ new_tmsg->family = PF_BRIDGE;
+ new_tmsg->ifindex = dev->ifindex;
+
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ if (idx < s_idx) {
+ idx++;
+ continue;
+ }
+ if (!vbegin) {
+ vbegin = v;
+ vend = v;
+ continue;
+ }
+ if (!dump_stats && vnirange(vend, v) == 1 &&
+ vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) {
+ goto update_end;
+ } else {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend,
+ dump_stats)) {
+ err = -EMSGSIZE;
+ break;
+ }
+ idx += vnirange(vbegin, vend) + 1;
+ vbegin = v;
+ }
+update_end:
+ vend = v;
+ }
+
+ if (!err && vbegin) {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats))
+ err = -EMSGSIZE;
+ }
+
+ cb->args[1] = err ? idx : 0;
+
+ nlmsg_end(skb, nlh);
+
+ return err;
+}
+
+static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx = 0, err = 0, s_idx = cb->args[0];
+ struct net *net = sock_net(skb->sk);
+ struct tunnel_msg *tmsg;
+ struct net_device *dev;
+
+ tmsg = nlmsg_data(cb->nlh);
+
+ if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
+ NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ if (tmsg->ifindex) {
+ dev = dev_get_by_index_rcu(net, tmsg->ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ err = vxlan_vnifilter_dump_dev(dev, skb, cb);
+ /* if the dump completed without an error we return 0 here */
+ if (err != -EMSGSIZE)
+ goto out_err;
+ } else {
+ for_each_netdev_rcu(net, dev) {
+ if (!netif_is_vxlan(dev))
+ continue;
+ if (idx < s_idx)
+ goto skip;
+ err = vxlan_vnifilter_dump_dev(dev, skb, cb);
+ if (err == -EMSGSIZE)
+ break;
+skip:
+ idx++;
+ }
+ }
+ cb->args[0] = idx;
+ rcu_read_unlock();
+
+ return skb->len;
+
+out_err:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static const struct nla_policy vni_filter_entry_policy[VXLAN_VNIFILTER_ENTRY_MAX + 1] = {
+ [VXLAN_VNIFILTER_ENTRY_START] = { .type = NLA_U32 },
+ [VXLAN_VNIFILTER_ENTRY_END] = { .type = NLA_U32 },
+ [VXLAN_VNIFILTER_ENTRY_GROUP] = { .type = NLA_BINARY,
+ .len = sizeof_field(struct iphdr, daddr) },
+ [VXLAN_VNIFILTER_ENTRY_GROUP6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr) },
+};
+
+static const struct nla_policy vni_filter_policy[VXLAN_VNIFILTER_MAX + 1] = {
+ [VXLAN_VNIFILTER_ENTRY] = { .type = NLA_NESTED },
+};
+
+static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *remote_ip,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ u32 hash_index;
+ int err = 0;
+
+ hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ if (remote_ip && !vxlan_addr_any(remote_ip)) {
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_APPEND | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vni,
+ vni,
+ dst->remote_ifindex,
+ NTF_SELF, 0, true, extack);
+ if (err) {
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ return err;
+ }
+ }
+
+ if (old_remote_ip && !vxlan_addr_any(old_remote_ip)) {
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ *old_remote_ip,
+ vxlan->cfg.dst_port,
+ vni, vni,
+ dst->remote_ifindex,
+ true);
+ }
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+
+ return err;
+}
+
+static int vxlan_vni_update_group(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode,
+ union vxlan_addr *group,
+ bool create, bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ union vxlan_addr *newrip = NULL, *oldrip = NULL;
+ union vxlan_addr old_remote_ip;
+ int ret = 0;
+
+ memcpy(&old_remote_ip, &vninode->remote_ip, sizeof(old_remote_ip));
+
+ /* if per vni remote ip is not present use vxlan dev
+ * default dst remote ip for fdb entry
+ */
+ if (group && !vxlan_addr_any(group)) {
+ newrip = group;
+ } else {
+ if (!vxlan_addr_any(&dst->remote_ip))
+ newrip = &dst->remote_ip;
+ }
+
+ /* if old rip exists, and no newrip,
+ * explicitly delete old rip
+ */
+ if (!newrip && !vxlan_addr_any(&old_remote_ip))
+ oldrip = &old_remote_ip;
+
+ if (!newrip && !oldrip)
+ return 0;
+
+ if (!create && oldrip && newrip && vxlan_addr_equal(oldrip, newrip))
+ return 0;
+
+ ret = vxlan_update_default_fdb_entry(vxlan, vninode->vni,
+ oldrip, newrip,
+ extack);
+ if (ret)
+ goto out;
+
+ if (group)
+ memcpy(&vninode->remote_ip, group, sizeof(vninode->remote_ip));
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&old_remote_ip) &&
+ !vxlan_group_used(vn, vxlan, vninode->vni,
+ &old_remote_ip,
+ vxlan->default_dst.remote_ifindex)) {
+ ret = vxlan_igmp_leave(vxlan, &old_remote_ip,
+ 0);
+ if (ret)
+ goto out;
+ }
+
+ if (vxlan_addr_multicast(&vninode->remote_ip)) {
+ ret = vxlan_igmp_join(vxlan, &vninode->remote_ip, 0);
+ if (ret == -EADDRINUSE)
+ ret = 0;
+ if (ret)
+ goto out;
+ }
+ }
+
+ *changed = true;
+
+ return 0;
+out:
+ return ret;
+}
+
+int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
+ union vxlan_addr *old_remote_ip,
+ union vxlan_addr *new_remote_ip,
+ struct netlink_ext_ack *extack)
+{
+ struct list_head *headp, *hpos;
+ struct vxlan_vni_group *vg;
+ struct vxlan_vni_node *vent;
+ int ret;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ headp = &vg->vni_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct vxlan_vni_node, vlist);
+ if (vxlan_addr_any(&vent->remote_ip)) {
+ ret = vxlan_update_default_fdb_entry(vxlan, vent->vni,
+ old_remote_ip,
+ new_remote_ip,
+ extack);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
+ struct vxlan_vni_node *vninode)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ /* if per vni remote_ip not present, delete the
+ * default dst remote_ip previously added for this vni
+ */
+ if (!vxlan_addr_any(&vninode->remote_ip) ||
+ !vxlan_addr_any(&dst->remote_ip))
+ __vxlan_fdb_delete(vxlan, all_zeros_mac,
+ (vxlan_addr_any(&vninode->remote_ip) ?
+ dst->remote_ip : vninode->remote_ip),
+ vxlan->cfg.dst_port,
+ vninode->vni, vninode->vni,
+ dst->remote_ifindex,
+ true);
+
+ if (vxlan->dev->flags & IFF_UP) {
+ if (vxlan_addr_multicast(&vninode->remote_ip) &&
+ !vxlan_group_used(vn, vxlan, vninode->vni,
+ &vninode->remote_ip,
+ dst->remote_ifindex)) {
+ vxlan_igmp_leave(vxlan, &vninode->remote_ip, 0);
+ }
+ }
+}
+
+static int vxlan_vni_update(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ __be32 vni, union vxlan_addr *group,
+ bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ int ret;
+
+ vninode = rhashtable_lookup_fast(&vg->vni_hash, &vni,
+ vxlan_vni_rht_params);
+ if (!vninode)
+ return 0;
+
+ ret = vxlan_vni_update_group(vxlan, vninode, group, false, changed,
+ extack);
+ if (ret)
+ return ret;
+
+ if (changed)
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
+
+ return 0;
+}
+
+static void __vxlan_vni_add_list(struct vxlan_vni_group *vg,
+ struct vxlan_vni_node *v)
+{
+ struct list_head *headp, *hpos;
+ struct vxlan_vni_node *vent;
+
+ headp = &vg->vni_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct vxlan_vni_node, vlist);
+ if (be32_to_cpu(v->vni) < be32_to_cpu(vent->vni))
+ continue;
+ else
+ break;
+ }
+ list_add_rcu(&v->vlist, hpos);
+ vg->num_vnis++;
+}
+
+static void __vxlan_vni_del_list(struct vxlan_vni_group *vg,
+ struct vxlan_vni_node *v)
+{
+ list_del_rcu(&v->vlist);
+ vg->num_vnis--;
+}
+
+static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
+ __be32 vni)
+{
+ struct vxlan_vni_node *vninode;
+
+ vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC);
+ if (!vninode)
+ return NULL;
+ vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu);
+ if (!vninode->stats) {
+ kfree(vninode);
+ return NULL;
+ }
+ vninode->vni = vni;
+ vninode->hlist4.vxlan = vxlan;
+#if IS_ENABLED(CONFIG_IPV6)
+ vninode->hlist6.vxlan = vxlan;
+#endif
+
+ return vninode;
+}
+
+static int vxlan_vni_add(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ u32 vni, union vxlan_addr *group,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ __be32 v = cpu_to_be32(vni);
+ bool changed = false;
+ int err = 0;
+
+ if (vxlan_vnifilter_lookup(vxlan, v))
+ return vxlan_vni_update(vxlan, vg, v, group, &changed, extack);
+
+ err = vxlan_vni_in_use(vxlan->net, vxlan, &vxlan->cfg, v);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "VNI in use");
+ return err;
+ }
+
+ vninode = vxlan_vni_alloc(vxlan, v);
+ if (!vninode)
+ return -ENOMEM;
+
+ err = rhashtable_lookup_insert_fast(&vg->vni_hash,
+ &vninode->vnode,
+ vxlan_vni_rht_params);
+ if (err) {
+ kfree(vninode);
+ return err;
+ }
+
+ __vxlan_vni_add_list(vg, vninode);
+
+ if (vxlan->dev->flags & IFF_UP)
+ vxlan_vs_add_del_vninode(vxlan, vninode, false);
+
+ err = vxlan_vni_update_group(vxlan, vninode, group, true, &changed,
+ extack);
+
+ if (changed)
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_NEWTUNNEL);
+
+ return err;
+}
+
+static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
+{
+ struct vxlan_vni_node *v;
+
+ v = container_of(rcu, struct vxlan_vni_node, rcu);
+ free_percpu(v->stats);
+ kfree(v);
+}
+
+static int vxlan_vni_del(struct vxlan_dev *vxlan,
+ struct vxlan_vni_group *vg,
+ u32 vni, struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_node *vninode;
+ __be32 v = cpu_to_be32(vni);
+ int err = 0;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ vninode = rhashtable_lookup_fast(&vg->vni_hash, &v,
+ vxlan_vni_rht_params);
+ if (!vninode) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ vxlan_vni_delete_group(vxlan, vninode);
+
+ err = rhashtable_remove_fast(&vg->vni_hash,
+ &vninode->vnode,
+ vxlan_vni_rht_params);
+ if (err)
+ goto out;
+
+ __vxlan_vni_del_list(vg, vninode);
+
+ vxlan_vnifilter_notify(vxlan, vninode, RTM_DELTUNNEL);
+
+ if (vxlan->dev->flags & IFF_UP)
+ vxlan_vs_add_del_vninode(vxlan, vninode, true);
+
+ call_rcu(&vninode->rcu, vxlan_vni_node_rcu_free);
+
+ return 0;
+out:
+ return err;
+}
+
+static int vxlan_vni_add_del(struct vxlan_dev *vxlan, __u32 start_vni,
+ __u32 end_vni, union vxlan_addr *group,
+ int cmd, struct netlink_ext_ack *extack)
+{
+ struct vxlan_vni_group *vg;
+ int v, err = 0;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+
+ for (v = start_vni; v <= end_vni; v++) {
+ switch (cmd) {
+ case RTM_NEWTUNNEL:
+ err = vxlan_vni_add(vxlan, vg, v, group, extack);
+ break;
+ case RTM_DELTUNNEL:
+ err = vxlan_vni_del(vxlan, vg, v, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ if (err)
+ goto out;
+ }
+
+ return 0;
+out:
+ return err;
+}
+
+static int vxlan_process_vni_filter(struct vxlan_dev *vxlan,
+ struct nlattr *nlvnifilter,
+ int cmd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *vattrs[VXLAN_VNIFILTER_ENTRY_MAX + 1];
+ u32 vni_start = 0, vni_end = 0;
+ union vxlan_addr group;
+ int err;
+
+ err = nla_parse_nested(vattrs,
+ VXLAN_VNIFILTER_ENTRY_MAX,
+ nlvnifilter, vni_filter_entry_policy,
+ extack);
+ if (err)
+ return err;
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_START]) {
+ vni_start = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_START]);
+ vni_end = vni_start;
+ }
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_END])
+ vni_end = nla_get_u32(vattrs[VXLAN_VNIFILTER_ENTRY_END]);
+
+ if (!vni_start && !vni_end) {
+ NL_SET_ERR_MSG_ATTR(extack, nlvnifilter,
+ "vni start nor end found in vni entry");
+ return -EINVAL;
+ }
+
+ if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]) {
+ group.sin.sin_addr.s_addr =
+ nla_get_in_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP]);
+ group.sa.sa_family = AF_INET;
+ } else if (vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]) {
+ group.sin6.sin6_addr =
+ nla_get_in6_addr(vattrs[VXLAN_VNIFILTER_ENTRY_GROUP6]);
+ group.sa.sa_family = AF_INET6;
+ } else {
+ memset(&group, 0, sizeof(group));
+ }
+
+ if (vxlan_addr_multicast(&group) && !vxlan->default_dst.remote_ifindex) {
+ NL_SET_ERR_MSG(extack,
+ "Local interface required for multicast remote group");
+
+ return -EINVAL;
+ }
+
+ err = vxlan_vni_add_del(vxlan, vni_start, vni_end, &group, cmd,
+ extack);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_node *v, *tmp;
+ struct vxlan_vni_group *vg;
+
+ vg = rtnl_dereference(vxlan->vnigrp);
+ list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
+ rhashtable_remove_fast(&vg->vni_hash, &v->vnode,
+ vxlan_vni_rht_params);
+ hlist_del_init_rcu(&v->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+ hlist_del_init_rcu(&v->hlist6.hlist);
+#endif
+ __vxlan_vni_del_list(vg, v);
+ vxlan_vnifilter_notify(vxlan, v, RTM_DELTUNNEL);
+ call_rcu(&v->rcu, vxlan_vni_node_rcu_free);
+ }
+ rhashtable_destroy(&vg->vni_hash);
+ kfree(vg);
+}
+
+int vxlan_vnigroup_init(struct vxlan_dev *vxlan)
+{
+ struct vxlan_vni_group *vg;
+ int ret;
+
+ vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ if (!vg)
+ return -ENOMEM;
+ ret = rhashtable_init(&vg->vni_hash, &vxlan_vni_rht_params);
+ if (ret) {
+ kfree(vg);
+ return ret;
+ }
+ INIT_LIST_HEAD(&vg->vni_list);
+ rcu_assign_pointer(vxlan->vnigrp, vg);
+
+ return 0;
+}
+
+static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tunnel_msg *tmsg;
+ struct vxlan_dev *vxlan;
+ struct net_device *dev;
+ struct nlattr *attr;
+ int err, vnis = 0;
+ int rem;
+
+ /* this should validate the header and check for remaining bytes */
+ err = nlmsg_parse(nlh, sizeof(*tmsg), NULL, VXLAN_VNIFILTER_MAX,
+ vni_filter_policy, extack);
+ if (err < 0)
+ return err;
+
+ tmsg = nlmsg_data(nlh);
+ dev = __dev_get_by_index(net, tmsg->ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ if (!netif_is_vxlan(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "The device is not a vxlan device");
+ return -EINVAL;
+ }
+
+ vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return -EOPNOTSUPP;
+
+ nlmsg_for_each_attr(attr, nlh, sizeof(*tmsg), rem) {
+ switch (nla_type(attr)) {
+ case VXLAN_VNIFILTER_ENTRY:
+ err = vxlan_process_vni_filter(vxlan, attr,
+ nlh->nlmsg_type, extack);
+ break;
+ default:
+ continue;
+ }
+ vnis++;
+ if (err)
+ break;
+ }
+
+ if (!vnis) {
+ NL_SET_ERR_MSG_MOD(extack, "No vnis found to process");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+void vxlan_vnifilter_init(void)
+{
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
+ vxlan_vnifilter_dump, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
+ vxlan_vnifilter_process, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
+ vxlan_vnifilter_process, NULL, 0);
+}
+
+void vxlan_vnifilter_uninit(void)
+{
+ rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
+ rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
+ rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
+}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6a142dc85c37..76c6b4f89890 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -57,6 +57,7 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/uaccess.h>
+#include <linux/jiffies.h>
//#include <asm/spinlock.h>
#define DRIVER_MAJOR_VERSION 1
@@ -1968,7 +1969,7 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
+ if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT))
goto bug_out;
/*
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 141c1b5a7b1f..9cabd342d156 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -104,7 +104,7 @@ static void ar5523_cmd_rx_cb(struct urb *urb)
}
if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
- ar5523_err(ar, "RX USB to short.\n");
+ ar5523_err(ar, "RX USB too short.\n");
goto skip;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 8f5b8eb368fa..9e1f483e1362 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -75,6 +75,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -111,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -148,6 +150,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -184,6 +187,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_sdio_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -216,6 +220,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -252,6 +257,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -288,6 +294,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -325,6 +332,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -370,6 +378,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
@@ -415,6 +424,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
@@ -461,6 +471,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
@@ -501,6 +512,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
@@ -537,6 +549,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -575,6 +588,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
@@ -611,6 +625,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
@@ -643,6 +658,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dir = WCN3990_HW_1_0_FW_DIR,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &wcn3990_rx_desc_ops,
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_TLV_NUM_PEERS,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 127b4e4980ef..907e1e13871a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -131,6 +131,159 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
HTT_T2H_MSG_TYPE_PEER_STATS,
};
+const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload)
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return MS(__le32_to_cpu(rx_desc->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return !!(rx_desc->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
+}
+
+const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload),
+
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+};
+
+static void ath10k_rx_desc_wcn3990_get_offsets(struct htt_rx_ring_rx_desc_offsets *off)
+{
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v2, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static struct htt_rx_desc *
+ath10k_rx_desc_wcn3990_from_raw_buffer(void *buff)
+{
+ return &((struct htt_rx_desc_v2 *)buff)->base;
+}
+
+static struct rx_attention *
+ath10k_rx_desc_wcn3990_get_attention(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->attention;
+}
+
+static struct rx_frag_info_common *
+ath10k_rx_desc_wcn3990_get_frag_info(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->frag_info.common;
+}
+
+static struct rx_mpdu_start *
+ath10k_rx_desc_wcn3990_get_mpdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_start;
+}
+
+static struct rx_mpdu_end *
+ath10k_rx_desc_wcn3990_get_mpdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_end;
+}
+
+static struct rx_msdu_start_common *
+ath10k_rx_desc_wcn3990_get_msdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_start.common;
+}
+
+static struct rx_msdu_end_common *
+ath10k_rx_desc_wcn3990_get_msdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_end.common;
+}
+
+static struct rx_ppdu_start *
+ath10k_rx_desc_wcn3990_get_ppdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_start;
+}
+
+static struct rx_ppdu_end_common *
+ath10k_rx_desc_wcn3990_get_ppdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_end.common;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_rx_hdr_status(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->rx_hdr_status;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_msdu_payload(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->msdu_payload;
+}
+
+const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v2),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v2, msdu_payload),
+
+ .rx_desc_from_raw_buffer = ath10k_rx_desc_wcn3990_from_raw_buffer,
+ .rx_desc_get_offsets = ath10k_rx_desc_wcn3990_get_offsets,
+ .rx_desc_get_attention = ath10k_rx_desc_wcn3990_get_attention,
+ .rx_desc_get_frag_info = ath10k_rx_desc_wcn3990_get_frag_info,
+ .rx_desc_get_mpdu_start = ath10k_rx_desc_wcn3990_get_mpdu_start,
+ .rx_desc_get_mpdu_end = ath10k_rx_desc_wcn3990_get_mpdu_end,
+ .rx_desc_get_msdu_start = ath10k_rx_desc_wcn3990_get_msdu_start,
+ .rx_desc_get_msdu_end = ath10k_rx_desc_wcn3990_get_msdu_end,
+ .rx_desc_get_ppdu_start = ath10k_rx_desc_wcn3990_get_ppdu_start,
+ .rx_desc_get_ppdu_end = ath10k_rx_desc_wcn3990_get_ppdu_end,
+ .rx_desc_get_rx_hdr_status = ath10k_rx_desc_wcn3990_get_rx_hdr_status,
+ .rx_desc_get_msdu_payload = ath10k_rx_desc_wcn3990_get_msdu_payload,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 9a3a8907389b..f06cf39204e2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -240,14 +240,7 @@ enum htt_rx_ring_flags {
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
-struct htt_rx_ring_setup_ring32 {
- __le32 fw_idx_shadow_reg_paddr;
- __le32 rx_ring_base_paddr;
- __le16 rx_ring_len; /* in 4-byte words */
- __le16 rx_ring_bufsize; /* rx skb size - in bytes */
- __le16 flags; /* %HTT_RX_RING_FLAGS_ */
- __le16 fw_idx_init_val;
-
+struct htt_rx_ring_rx_desc_offsets {
/* the following offsets are in 4-byte units */
__le16 mac80211_hdr_offset;
__le16 msdu_payload_offset;
@@ -261,6 +254,17 @@ struct htt_rx_ring_setup_ring32 {
__le16 frag_info_offset;
} __packed;
+struct htt_rx_ring_setup_ring32 {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ struct htt_rx_ring_rx_desc_offsets offsets;
+} __packed;
+
struct htt_rx_ring_setup_ring64 {
__le64 fw_idx_shadow_reg_paddr;
__le64 rx_ring_base_paddr;
@@ -269,17 +273,7 @@ struct htt_rx_ring_setup_ring64 {
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
__le16 fw_idx_init_val;
- /* the following offsets are in 4-byte units */
- __le16 mac80211_hdr_offset;
- __le16 msdu_payload_offset;
- __le16 ppdu_start_offset;
- __le16 ppdu_end_offset;
- __le16 mpdu_start_offset;
- __le16 mpdu_end_offset;
- __le16 msdu_start_offset;
- __le16 msdu_end_offset;
- __le16 rx_attention_offset;
- __le16 frag_info_offset;
+ struct htt_rx_ring_rx_desc_offsets offsets;
} __packed;
struct htt_rx_ring_setup_hdr {
@@ -2075,12 +2069,22 @@ static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
}
+/* the driver strongly assumes that the rx header status be 64 bytes long,
+ * so all possible rx_desc structures must respect this assumption.
+ */
#define RX_HTT_HDR_STATUS_LEN 64
-/* This structure layout is programmed via rx ring setup
+/* The rx descriptor structure layout is programmed via rx ring setup
* so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring.
+ * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
+ * when modifying the structure layout of the rx descriptor beyond what it expects
+ * (even if it correctly programmed during the rx ring setup).
+ * Therefore we must keep two different memory layouts, abstract the rx descriptor
+ * representation and use ath10k_rx_desc_ops
+ * for correctly accessing rx descriptor data.
*/
+
+/* base struct used for abstracting the rx descritor representation */
struct htt_rx_desc {
union {
/* This field is filled on the host using the msdu buffer
@@ -2089,6 +2093,13 @@ struct htt_rx_desc {
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
+} __packed;
+
+/* rx descriptor for wcn3990 and possibly extensible for newer cards
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v2 {
+ struct htt_rx_desc base;
struct {
struct rx_attention attention;
struct rx_frag_info frag_info;
@@ -2103,6 +2114,240 @@ struct htt_rx_desc {
u8 msdu_payload[];
};
+/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
+ * works correctly. We keep a single rx descriptor for all these three
+ * families of cards because from tests it seems to be the most stable solution,
+ * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
+ * during some tests.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v1 {
+ struct htt_rx_desc base;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info_v1 frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start_v1 msdu_start;
+ struct rx_msdu_end_v1 msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end_v1 ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+};
+
+/* rx_desc abstraction */
+struct ath10k_htt_rx_desc_ops {
+ /* These fields are mandatory, they must be specified in any instance */
+
+ /* sizeof() of the rx_desc structure used by this hw */
+ size_t rx_desc_size;
+
+ /* offset of msdu_payload inside the rx_desc structure used by this hw */
+ size_t rx_desc_msdu_payload_offset;
+
+ /* These fields are options.
+ * When a field is not provided the default implementation gets used
+ * (see the ath10k_rx_desc_* operations below for more info about the defaults)
+ */
+ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+
+ /* Safely cast from a void* buffer containing an rx descriptor
+ * to the proper rx_desc structure
+ */
+ struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
+
+ void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
+ struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
+ struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
+ struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
+ struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
+
+static inline int
+ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
+ return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
+static inline bool
+ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
+ return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
+ return false;
+}
+
+/* The default implementation of all these getters is using the old rx_desc,
+ * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
+ * But probably, if new wireless cards must be supported, it would be better
+ * to switch the default implementation to the new rx_desc, since this would
+ * make the extension easier .
+ */
+static inline struct htt_rx_desc *
+ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
+{
+ if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
+ return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
+ return &((struct htt_rx_desc_v1 *)buff)->base;
+}
+
+static inline void
+ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_rx_desc_offsets *off)
+{
+ if (hw->rx_desc_ops->rx_desc_get_offsets) {
+ hw->rx_desc_ops->rx_desc_get_offsets(off);
+ } else {
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+ }
+}
+
+static inline struct rx_attention *
+ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_attention)
+ return hw->rx_desc_ops->rx_desc_get_attention(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->attention;
+}
+
+static inline struct rx_frag_info_common *
+ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_frag_info)
+ return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->frag_info.common;
+}
+
+static inline struct rx_mpdu_start *
+ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_start;
+}
+
+static inline struct rx_mpdu_end *
+ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_end;
+}
+
+static inline struct rx_msdu_start_common *
+ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_start)
+ return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_start.common;
+}
+
+static inline struct rx_msdu_end_common *
+ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_end)
+ return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_end.common;
+}
+
+static inline struct rx_ppdu_start *
+ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_start;
+}
+
+static inline struct rx_ppdu_end_common *
+ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_end.common;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
+ return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->rx_hdr_status;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
+ return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->msdu_payload;
+}
+
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
@@ -2136,7 +2381,14 @@ struct htt_rx_chan_info {
* rounded up to a cache line size.
*/
#define HTT_RX_BUF_SIZE 2048
-#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
+ * because it depends on the underlying device rx_desc representation
+ */
+static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
+{
+ return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
+}
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely.
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index adbaeb67eedf..771252dd6d4e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -21,7 +21,10 @@
#define HTT_RX_RING_REFILL_RESCHED_MS 5
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+/* shortcut to interpret a raw memory buffer as a rx descriptor */
+#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
+
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
@@ -128,6 +131,7 @@ static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
+ struct ath10k_hw_params *hw = &htt->ar->hw_params;
struct htt_rx_desc *rx_desc;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb;
@@ -163,8 +167,8 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
skb->data);
/* Clear rx_desc attention word before posting to Rx ring */
- rx_desc = (struct htt_rx_desc *)skb->data;
- rx_desc->attention.flags = __cpu_to_le32(0);
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
+ ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
paddr = dma_map_single(htt->ar->dev, skb->data,
skb->len + skb_tailroom(skb),
@@ -343,9 +347,14 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
+ struct rx_attention *rx_desc_attention;
+ struct rx_frag_info_common *rx_desc_frag_info_common;
+ struct rx_msdu_start_common *rx_desc_msdu_start_common;
+ struct rx_msdu_end_common *rx_desc_msdu_end_common;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -360,13 +369,18 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
__skb_queue_tail(amsdu, msdu);
- rx_desc = (struct htt_rx_desc *)msdu->data;
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
+ rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
+ rx_desc);
+ rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
+ rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
/* FIXME: we must report msdu payload since this is what caller
* expects now
*/
- skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
- skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
/*
* Sanity check - confirm the HW is finished filling in the
@@ -376,24 +390,24 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
* To prevent the case that we handle a stale Rx descriptor,
* just assert for now until we have a way to recover.
*/
- if (!(__le32_to_cpu(rx_desc->attention.flags)
+ if (!(__le32_to_cpu(rx_desc_attention->flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
__skb_queue_purge(amsdu);
return -EIO;
}
- msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
- msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
+ msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
- msdu_chained = rx_desc->frag_info.ring2_more_count;
+ msdu_chained = rx_desc_frag_info_common->ring2_more_count;
if (msdu_len_invalid)
msdu_len = 0;
skb_trim(msdu, 0);
- skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
msdu_len -= msdu->len;
/* Note: Chained buffers do not contain rx descriptor */
@@ -411,11 +425,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_chaining = 1;
}
- last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
+ last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
- sizeof(*rx_desc) - sizeof(u32));
+ /* FIXME: why are we skipping the first part of the rx_desc? */
+ trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
+ hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu)
break;
@@ -480,6 +495,7 @@ static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u32 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
@@ -488,12 +504,12 @@ static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
@@ -556,6 +572,7 @@ ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u64 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
@@ -564,12 +581,12 @@ ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
@@ -631,8 +648,10 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
@@ -667,15 +686,16 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
@@ -693,8 +713,10 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
@@ -728,15 +750,16 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
@@ -944,16 +967,32 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct rx_mpdu_end *rxd_mpdu_end;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_ppdu_start *rxd_ppdu_start;
struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss;
+ u8 *rxd_msdu_payload;
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
u32 stbc, nsts_su;
- info1 = __le32_to_cpu(rxd->ppdu_start.info1);
- info2 = __le32_to_cpu(rxd->ppdu_start.info2);
- info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
+ rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
+
+ info1 = __le32_to_cpu(rxd_ppdu_start->info1);
+ info2 = __le32_to_cpu(rxd_ppdu_start->info2);
+ info3 = __le32_to_cpu(rxd_ppdu_start->info3);
preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
@@ -1022,24 +1061,24 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
if (mcs > 0x09) {
ath10k_warn(ar, "invalid MCS received %u\n", mcs);
ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
- __le32_to_cpu(rxd->attention.flags),
- __le32_to_cpu(rxd->mpdu_start.info0),
- __le32_to_cpu(rxd->mpdu_start.info1),
- __le32_to_cpu(rxd->msdu_start.common.info0),
- __le32_to_cpu(rxd->msdu_start.common.info1),
- rxd->ppdu_start.info0,
- __le32_to_cpu(rxd->ppdu_start.info1),
- __le32_to_cpu(rxd->ppdu_start.info2),
- __le32_to_cpu(rxd->ppdu_start.info3),
- __le32_to_cpu(rxd->ppdu_start.info4));
+ __le32_to_cpu(rxd_attention->flags),
+ __le32_to_cpu(rxd_mpdu_start->info0),
+ __le32_to_cpu(rxd_mpdu_start->info1),
+ __le32_to_cpu(rxd_msdu_start_common->info0),
+ __le32_to_cpu(rxd_msdu_start_common->info1),
+ rxd_ppdu_start->info0,
+ __le32_to_cpu(rxd_ppdu_start->info1),
+ __le32_to_cpu(rxd_ppdu_start->info2),
+ __le32_to_cpu(rxd_ppdu_start->info3),
+ __le32_to_cpu(rxd_ppdu_start->info4));
ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
- __le32_to_cpu(rxd->msdu_end.common.info0),
- __le32_to_cpu(rxd->mpdu_end.info0));
+ __le32_to_cpu(rxd_msdu_end_common->info0),
+ __le32_to_cpu(rxd_mpdu_end->info0));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"rx desc msdu payload: ",
- rxd->msdu_payload, 50);
+ rxd_msdu_payload, 50);
}
status->rate_idx = mcs;
@@ -1059,6 +1098,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
@@ -1069,15 +1112,19 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
if (!rxd)
return NULL;
- if (rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ if (rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
- if (!(rxd->msdu_end.common.info0 &
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
- peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
@@ -1167,14 +1214,16 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
int i;
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
status->chains &= ~BIT(i);
- if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
+ if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_chains[i].pri20_mhz;
+ rxd_ppdu_start->rssi_chains[i].pri20_mhz;
status->chains |= BIT(i);
}
@@ -1182,7 +1231,7 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_comb;
+ rxd_ppdu_start->rssi_comb;
status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
@@ -1190,13 +1239,18 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_end_common *rxd_ppdu_end_common;
+
+ rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
+
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
* means all prior MSDUs in a PPDU are reported to mac80211 without the
* TSF. Is it worth holding frames until end of PPDU is known?
*
* FIXME: Can we get/compute 64bit TSF?
*/
- status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+ status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
@@ -1206,7 +1260,9 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
u32 vdev_id)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
bool is_first_ppdu;
bool is_last_ppdu;
@@ -1214,11 +1270,14 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_first_ppdu = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ is_first_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
- is_last_ppdu = !!(rxd->attention.flags &
+ is_last_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
if (is_first_ppdu) {
@@ -1357,7 +1416,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
size_t hdr_len;
size_t crypto_len;
bool is_first;
@@ -1366,10 +1427,13 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
int bytes_aligned = ar->hw_params.decap_align_bytes;
u8 *qos;
- rxd = (void *)msdu->data - sizeof(*rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
@@ -1387,7 +1451,7 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
* error packets. If limit exceeds, hw sends all remaining MSDUs as
* a single last MSDU with this msdu limit error set.
*/
- msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
+ msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
* without first MSDU is expected in that case, and handled later here.
@@ -1479,6 +1543,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
size_t hdr_len;
@@ -1499,9 +1564,10 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
- rxd = (void *)msdu->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
@@ -1537,18 +1603,25 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ u8 *rxd_rx_hdr_status;
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
int bytes_aligned = ar->hw_params.decap_align_bytes;
- rxd = (void *)msdu->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+ hdr = (void *)rxd_rx_hdr_status;
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
@@ -1574,6 +1647,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
size_t hdr_len;
@@ -1593,8 +1667,10 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, l3_pad_bytes);
@@ -1635,6 +1711,7 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
@@ -1647,8 +1724,10 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
@@ -1673,7 +1752,9 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype,
bool is_decrypted)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
enum rx_msdu_decap_format decap;
/* First msdu's decapped header:
@@ -1687,8 +1768,11 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
* [rfc1042/llc]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
@@ -1710,17 +1794,23 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
}
}
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
u32 flags, info;
bool is_ip4, is_ip6;
bool is_tcp, is_udp;
bool ip_csum_ok, tcpudp_csum_ok;
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
- info = __le32_to_cpu(rxd->msdu_start.common.info1);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ flags = __le32_to_cpu(rxd_attention->flags);
+ info = __le32_to_cpu(rxd_msdu_start_common->info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1741,9 +1831,10 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
-static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
+ struct sk_buff *msdu)
{
- msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
@@ -1835,7 +1926,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff *first;
struct sk_buff *last;
struct sk_buff *msdu, *temp;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
@@ -1853,18 +1948,22 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_mgmt = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ is_mgmt = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
* decapped header. It'll be used for undecapping of each MSDU.
*/
- hdr = (void *)rxd->rx_hdr_status;
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
if (rx_hdr)
@@ -1882,8 +1981,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
- rxd = (void *)last->data - sizeof(*rxd);
- attention = __le32_to_cpu(rxd->attention.flags);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)last->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ attention = __le32_to_cpu(rxd_attention->flags);
has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
@@ -1971,7 +2073,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
continue;
}
- ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
@@ -2083,12 +2185,19 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
unsigned long *unchain_cnt)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_frag_info_common *rxd_frag_info;
enum rx_msdu_decap_format decap;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
/* FIXME: Current unchaining logic can only handle simple case of raw
@@ -2097,7 +2206,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
* try re-constructing such frames - it'll be pretty much garbage.
*/
if (decap != RX_MSDU_DECAP_RAW ||
- skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu);
return;
@@ -2112,7 +2221,10 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
u8 *subframe_hdr;
struct sk_buff *first;
bool is_first, is_last;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -2120,12 +2232,16 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Return in case of non-aggregated msdu */
@@ -2136,7 +2252,7 @@ static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
if (!is_first)
return false;
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -3028,11 +3144,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
+ struct sk_buff_head *list,
struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
if (skb_queue_empty(list))
return -ENOBUFS;
@@ -3043,15 +3161,22 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
@@ -3194,7 +3319,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index b793eac2cfac..9842a4b2f78f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -796,47 +796,26 @@ static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
return 0;
}
-static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring32 *ring =
(struct htt_rx_ring_setup_ring32 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
-static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring64 *ring =
(struct htt_rx_ring_setup_ring64 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
@@ -896,7 +875,7 @@ static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_32(ring);
+ ath10k_htt_fill_rx_desc_offset_32(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@@ -909,6 +888,7 @@ static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring64 *ring;
@@ -965,7 +945,7 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_64(ring);
+ ath10k_htt_fill_rx_desc_offset_64(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 57c58af64a57..e52e41a70321 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -11,6 +11,7 @@
#include "hif.h"
#include "wmi-ops.h"
#include "bmi.h"
+#include "rx_desc.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_soc_base_address = 0x00004000,
@@ -1134,21 +1135,7 @@ const struct ath10k_hw_ops qca988x_ops = {
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
-static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
-{
- return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
- RX_MSDU_END_INFO1_L3_HDR_PAD);
-}
-
-static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
-{
- return !!(rxd->msdu_end.common.info0 &
- __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
-}
-
const struct ath10k_hw_ops qca99x0_ops = {
- .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
- .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 591ef7416b61..5215a6816d71 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -510,6 +510,8 @@ struct ath10k_hw_clk_params {
u32 outdiv;
};
+struct htt_rx_desc_ops;
+
struct ath10k_hw_params {
u32 id;
u16 dev_id;
@@ -562,6 +564,9 @@ struct ath10k_hw_params {
*/
bool sw_decrypt_mcast_mgmt;
+ /* Rx descriptor abstraction */
+ const struct ath10k_htt_rx_desc_ops *rx_desc_ops;
+
const struct ath10k_hw_ops *hw_ops;
/* Number of bytes used for alignment in rx_hdr_status of rx desc. */
@@ -630,16 +635,14 @@ struct ath10k_hw_params {
bool dynamic_sar_support;
};
-struct htt_rx_desc;
struct htt_resp;
struct htt_data_tx_completion_ext;
+struct htt_rx_ring_rx_desc_offsets;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
- int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
- bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
};
@@ -653,24 +656,6 @@ extern const struct ath10k_hw_ops wcn3990_ops;
extern const struct ath10k_hw_clk_params qca6174_clk[];
static inline int
-ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
- return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
- return 0;
-}
-
-static inline bool
-ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_msdu_limit_error)
- return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd);
- return false;
-}
-
-static inline int
ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
struct htt_resp *htt)
{
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 705b6295e466..6ce2a8b1060d 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -196,17 +196,31 @@ struct rx_attention {
* descriptor.
*/
-struct rx_frag_info {
+struct rx_frag_info_common {
u8 ring0_more_count;
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
+} __packed;
+
+struct rx_frag_info_wcn3990 {
u8 ring4_more_count;
u8 ring5_more_count;
u8 ring6_more_count;
u8 ring7_more_count;
} __packed;
+struct rx_frag_info {
+ struct rx_frag_info_common common;
+ union {
+ struct rx_frag_info_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_frag_info_v1 {
+ struct rx_frag_info_common common;
+} __packed;
+
/*
* ring0_more_count
* Indicates the number of more buffers associated with RX DMA
@@ -474,11 +488,17 @@ struct rx_msdu_start_wcn3990 {
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
- struct rx_msdu_start_qca99x0 qca99x0;
struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_start_v1 {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
@@ -612,11 +632,17 @@ struct rx_msdu_end_wcn3990 {
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
- struct rx_msdu_end_qca99x0 qca99x0;
struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_end_v1 {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
@@ -1136,11 +1162,17 @@ struct rx_ppdu_end_wcn3990 {
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
+ struct rx_ppdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_ppdu_end_v1 {
+ struct rx_ppdu_end_common common;
+ union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
- struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 9513ab696fff..8328966a0471 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1306,13 +1306,10 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
}
for (i = 0; i < CE_COUNT; i++) {
- res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
- if (!res) {
- ath10k_err(ar, "failed to get IRQ%d\n", i);
- ret = -ENODEV;
- goto out;
- }
- ar_snoc->ce_irqs[i].irq_line = res->start;
+ ret = platform_get_irq(ar_snoc->dev, i);
+ if (ret < 0)
+ return ret;
+ ar_snoc->ce_irqs[i].irq_line = ret;
}
ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
@@ -1323,10 +1320,8 @@ static int ath10k_snoc_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
ar_snoc->xo_cal_data);
}
- ret = 0;
-out:
- return ret;
+ return 0;
}
static void ath10k_snoc_quirks_init(struct ath10k *ar)
@@ -1556,11 +1551,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (node) {
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
}
- of_node_put(node);
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 25e0ad36ddb1..b4733b5ded34 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -17,7 +17,7 @@ struct ath10k_fw_file;
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
- u8 data[0];
+ u8 data[];
} __packed;
struct ath10k_swap_code_seg_tail {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 6f8b64218894..10123974c3da 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -125,7 +125,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
tx_done->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status(htt->ar->hw, msdu);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7c1c2658cb5f..cd438f76f284 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2427,7 +2427,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
param->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status_irqsafe(ar->hw, msdu);
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 7d65c115669f..20b9aa8ddf7d 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
- new_pattern = old_pattern;
if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
- if (patterns[i].pkt_offset < ETH_HLEN)
+ if (patterns[i].pkt_offset < ETH_HLEN) {
ath10k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
- else
+ } else {
+ new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 3fb0aa000825..f407d4af2074 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -391,6 +391,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
}
}
@@ -466,7 +468,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
+static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
{
struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
@@ -574,7 +576,7 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
}
/* Configure external interrupts */
- ret = ath11k_ahb_ext_irq_config(ab);
+ ret = ath11k_ahb_config_ext_irq(ab);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 8255b6cfab0c..9644ff909502 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -145,7 +145,7 @@ struct ath11k_ce_ring {
u32 hal_ring_id;
/* keep last */
- struct sk_buff *skb[0];
+ struct sk_buff *skb[];
};
struct ath11k_ce_pipe {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 293563b3f784..71eb7d04c3bf 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -97,6 +98,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -161,6 +164,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
},
{
.name = "qca6390 hw2.0",
@@ -224,6 +229,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
},
{
.name = "qcn9074 hw1.0",
@@ -287,6 +294,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
},
{
.name = "wcn6855 hw2.0",
@@ -350,6 +359,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
},
{
.name = "wcn6855 hw2.1",
@@ -412,6 +423,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
},
};
@@ -1404,6 +1417,8 @@ EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
+ destroy_workqueue(ab->workqueue);
+
kfree(ab);
}
EXPORT_SYMBOL(ath11k_core_free);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 9e88ccca5ca7..c0228e91a596 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -263,6 +263,9 @@ struct ath11k_vif {
bool bcca_zero_sent;
bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_twt;
+#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {
@@ -441,6 +444,8 @@ struct ath11k_dbg_htt_stats {
spinlock_t lock;
};
+#define MAX_MODULE_ID_BITMAP_WORDS 16
+
struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
@@ -454,6 +459,9 @@ struct ath11k_debug {
u32 pktlog_peer_valid;
u8 pktlog_peer_addr[ETH_ALEN];
u32 rx_filter;
+ u32 mem_offset;
+ u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
+ struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
};
struct ath11k_per_peer_tx_stats {
@@ -603,6 +611,8 @@ struct ath11k {
struct completion finish_11d_ch_list;
bool pending_11d;
bool regdom_set_by_user;
+ int hw_rate_code;
+ u8 twt_enabled;
};
struct ath11k_band_cap {
@@ -806,7 +816,7 @@ struct ath11k_base {
} id;
/* must be last */
- u8 drv_priv[0] __aligned(sizeof(void *));
+ u8 drv_priv[] __aligned(sizeof(void *));
};
struct ath11k_fw_stats_pdev {
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index eda67ebfc4c2..2107ec05d14f 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -37,7 +37,8 @@ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff)
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -84,6 +85,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+ ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
ath11k_hal_srng_access_end(ab, srng);
return 0;
@@ -101,7 +103,8 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
- struct ath11k_dbring *ring)
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@@ -129,7 +132,7 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
kfree(buff);
break;
}
- ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@@ -210,7 +213,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
- ret = ath11k_dbring_fill_bufs(ar, ring);
+ ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
return ret;
}
@@ -270,7 +273,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
struct ath11k_buffer_addr desc;
u8 *vaddr_unalign;
u32 num_entry, num_buff_reaped;
- u8 pdev_idx, rbm;
+ u8 pdev_idx, rbm, module_id;
u32 cookie;
int buf_id;
int size;
@@ -278,6 +281,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
int ret = 0;
pdev_idx = ev->fixed.pdev_id;
+ module_id = ev->fixed.module_id;
if (pdev_idx >= ab->num_radios) {
ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
@@ -346,6 +350,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
DMA_FROM_DEVICE);
+ ath11k_debugfs_add_dbring_entry(ar, module_id,
+ ATH11K_DBG_DBR_EVENT_RX, srng);
+
if (ring->handler) {
vaddr_unalign = buff->payload;
handler_data.data = PTR_ALIGN(vaddr_unalign,
@@ -357,7 +364,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
buff->paddr = 0;
memset(buff->payload, 0, size);
- ath11k_dbring_bufs_replenish(ar, ring, buff);
+ ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
}
spin_unlock_bh(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 198ade90b725..a82266c8befc 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -52,6 +52,45 @@ static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
"MONITOR_DEST_RING",
};
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_data;
+ struct ath11k_dbg_dbr_entry *entry;
+
+ if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[id];
+ if (!dbr_debug)
+ return;
+
+ if (!dbr_debug->dbr_debug_enabled)
+ return;
+
+ dbr_data = &dbr_debug->dbr_dbg_data;
+
+ spin_lock_bh(&dbr_data->lock);
+
+ if (dbr_data->entries) {
+ entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
+ entry->hp = srng->u.src_ring.hp;
+ entry->tp = *srng->u.src_ring.tp_addr;
+ entry->timestamp = jiffies;
+ entry->event = event;
+
+ dbr_data->dbr_debug_idx++;
+ if (dbr_data->dbr_debug_idx ==
+ dbr_data->num_ring_debug_entries)
+ dbr_data->dbr_debug_idx = 0;
+ }
+
+ spin_unlock_bh(&dbr_data->lock);
+}
+
static void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath11k_fw_stats_pdev *i, *tmp;
@@ -666,6 +705,12 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
goto exit;
}
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+ goto exit;
+ }
+
if (enable) {
rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
@@ -870,6 +915,69 @@ static const struct file_operations fops_soc_dp_stats = {
.llseek = default_llseek,
};
+static ssize_t ath11k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[128] = {0};
+ struct ath11k_fw_dbglog dbglog;
+ unsigned int param, mod_id_index, is_end;
+ u64 value;
+ int ret, num;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
+
+ if (num < 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
+ param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
+ if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
+ if (!is_end) {
+ ret = count;
+ goto out;
+ }
+ } else {
+ if (num != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ dbglog.param = param;
+ dbglog.value = lower_32_bits(value);
+ ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
+ if (ret) {
+ ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .write = ath11k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@@ -1107,6 +1215,169 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
+ static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
+ int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
+ char *buf;
+ int i, ret;
+ int len = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+ len += scnprintf(buf + len, size - len,
+ "| idx | hp | tp | timestamp | event |\n");
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+
+ spin_lock_bh(&dbr_dbg_data->lock);
+
+ for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
+ len += scnprintf(buf + len, size - len,
+ "|%4u|%8u|%8u|%11llu|%8s|\n", i,
+ dbr_dbg_data->entries[i].hp,
+ dbr_dbg_data->entries[i].tp,
+ dbr_dbg_data->entries[i].timestamp,
+ event_id_to_string[dbr_dbg_data->entries[i].event]);
+ }
+
+ spin_unlock_bh(&dbr_dbg_data->lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_debug_dump_dbr_entries = {
+ .read = ath11k_debug_dump_dbr_entries,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_dbg_data->entries);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[dbr_id] = NULL;
+}
+
+static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
+
+ if (ar->debug.dbr_debug[dbr_id])
+ return 0;
+
+ ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
+ GFP_KERNEL);
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return -ENOMEM;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ if (dbr_debug->dbr_debugfs)
+ return 0;
+
+ dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
+ ar->debug.debugfs_pdev);
+ if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
+ if (IS_ERR(dbr_debug->dbr_debugfs))
+ return PTR_ERR(dbr_debug->dbr_debugfs);
+ return -ENOMEM;
+ }
+
+ dbr_debug->dbr_debug_enabled = true;
+ dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
+ dbr_dbg_data->dbr_debug_idx = 0;
+ dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
+ sizeof(struct ath11k_dbg_dbr_entry),
+ GFP_KERNEL);
+ if (!dbr_dbg_data->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&dbr_dbg_data->lock);
+
+ debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
+ dbr_dbg_data, &fops_debug_dump_dbr_entries);
+
+ return 0;
+}
+
+static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32] = {0};
+ u32 dbr_id, enable;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ goto out;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u", &dbr_id, &enable);
+ if (ret != 2 || dbr_id > 1 || enable > 1) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
+ goto out;
+ }
+
+ if (enable) {
+ ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
+ ret);
+ goto out;
+ }
+ } else {
+ ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_dbr_debug = {
+ .write = ath11k_debugfs_write_enable_dbr_dbg,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -1136,6 +1407,9 @@ int ath11k_debugfs_register(struct ath11k *ar)
debugfs_create_file("pktlog_filter", 0644,
ar->debug.debugfs_pdev, ar,
&fops_pktlog_filter);
+ debugfs_create_file("fw_dbglog_config", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_fw_dbglog);
if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
debugfs_create_file("dfs_simulate_radar", 0200,
@@ -1146,9 +1420,250 @@ int ath11k_debugfs_register(struct ath11k *ar)
&ar->dfs_block_radar_events);
}
+ if (ab->hw_params.dbr_debug_support)
+ debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_dbr_debug);
+
return 0;
}
void ath11k_debugfs_unregister(struct ath11k *ar)
{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ int i;
+
+ for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
+ dbr_debug = ar->debug.dbr_debug[i];
+ if (!dbr_debug)
+ continue;
+
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+ kfree(dbr_dbg_data->entries);
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[i] = NULL;
+ }
+}
+
+static ssize_t ath11k_write_twt_add_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_add_dialog_params params = { 0 };
+ u8 buf[128] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.wake_intvl_us,
+ &params.wake_intvl_mantis,
+ &params.wake_dura_us,
+ &params.sp_offset_us,
+ &params.twt_cmd,
+ &params.flag_bcast,
+ &params.flag_trigger,
+ &params.flag_flow_type,
+ &params.flag_protection);
+ if (ret != 16)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_del_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_del_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_pause_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_resume_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.sp_offset_us,
+ &params.next_twt_size);
+ if (ret != 9)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations ath11k_fops_twt_add_dialog = {
+ .write = ath11k_write_twt_add_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_del_dialog = {
+ .write = ath11k_write_twt_del_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_pause_dialog = {
+ .write = ath11k_write_twt_pause_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_resume_dialog = {
+ .write = ath11k_write_twt_resume_dialog,
+ .open = simple_open
+};
+
+int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+ if (arvif->vif->type == NL80211_IFTYPE_AP && !arvif->debugfs_twt) {
+ arvif->debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
+ ath11k_warn(arvif->ar->ab,
+ "failed to create directory %p\n",
+ arvif->debugfs_twt);
+ arvif->debugfs_twt = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
+
+ debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
+
+ debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
+ }
+ return 0;
+}
+
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+ debugfs_remove_recursive(arvif->debugfs_twt);
+ arvif->debugfs_twt = NULL;
}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 4c0740394c95..30c00cb28311 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -47,6 +47,36 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_NUM_EXT_STATS,
};
+#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
+
+enum ath11k_dbg_dbr_event {
+ ATH11K_DBG_DBR_EVENT_INVALID,
+ ATH11K_DBG_DBR_EVENT_RX,
+ ATH11K_DBG_DBR_EVENT_REPLENISH,
+ ATH11K_DBG_DBR_EVENT_MAX,
+};
+
+struct ath11k_dbg_dbr_entry {
+ u32 hp;
+ u32 tp;
+ u64 timestamp;
+ enum ath11k_dbg_dbr_event event;
+};
+
+struct ath11k_dbg_dbr_data {
+ /* protects ath11k_db_ring_debug data */
+ spinlock_t lock;
+ struct ath11k_dbg_dbr_entry *entries;
+ u32 dbr_debug_idx;
+ u32 num_ring_debug_entries;
+};
+
+struct ath11k_debug_dbr {
+ struct ath11k_dbg_dbr_data dbr_dbg_data;
+ struct dentry *dbr_debugfs;
+ bool dbr_debug_enabled;
+};
+
struct debug_htt_stats_req {
bool done;
u8 pdev_id;
@@ -88,6 +118,7 @@ enum ath11k_pktlog_mode {
};
enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_INVALID = 0,
ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
ATH11K_PKTLOG_TYPE_TX_STAT = 2,
ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
@@ -107,6 +138,130 @@ enum ath11k_dbg_aggr_mode {
ATH11K_DBG_AGGR_MODE_MAX,
};
+enum fw_dbglog_wlan_module_id {
+ WLAN_MODULE_ID_MIN = 0,
+ WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
+ WLAN_MODULE_WMI,
+ WLAN_MODULE_STA_PWRSAVE,
+ WLAN_MODULE_WHAL,
+ WLAN_MODULE_COEX,
+ WLAN_MODULE_ROAM,
+ WLAN_MODULE_RESMGR_CHAN_MANAGER,
+ WLAN_MODULE_RESMGR,
+ WLAN_MODULE_VDEV_MGR,
+ WLAN_MODULE_SCAN,
+ WLAN_MODULE_RATECTRL,
+ WLAN_MODULE_AP_PWRSAVE,
+ WLAN_MODULE_BLOCKACK,
+ WLAN_MODULE_MGMT_TXRX,
+ WLAN_MODULE_DATA_TXRX,
+ WLAN_MODULE_HTT,
+ WLAN_MODULE_HOST,
+ WLAN_MODULE_BEACON,
+ WLAN_MODULE_OFFLOAD,
+ WLAN_MODULE_WAL,
+ WLAN_WAL_MODULE_DE,
+ WLAN_MODULE_PCIELP,
+ WLAN_MODULE_RTT,
+ WLAN_MODULE_RESOURCE,
+ WLAN_MODULE_DCS,
+ WLAN_MODULE_CACHEMGR,
+ WLAN_MODULE_ANI,
+ WLAN_MODULE_P2P,
+ WLAN_MODULE_CSA,
+ WLAN_MODULE_NLO,
+ WLAN_MODULE_CHATTER,
+ WLAN_MODULE_WOW,
+ WLAN_MODULE_WAL_VDEV,
+ WLAN_MODULE_WAL_PDEV,
+ WLAN_MODULE_TEST,
+ WLAN_MODULE_STA_SMPS,
+ WLAN_MODULE_SWBMISS,
+ WLAN_MODULE_WMMAC,
+ WLAN_MODULE_TDLS,
+ WLAN_MODULE_HB,
+ WLAN_MODULE_TXBF,
+ WLAN_MODULE_BATCH_SCAN,
+ WLAN_MODULE_THERMAL_MGR,
+ WLAN_MODULE_PHYERR_DFS,
+ WLAN_MODULE_RMC,
+ WLAN_MODULE_STATS,
+ WLAN_MODULE_NAN,
+ WLAN_MODULE_IBSS_PWRSAVE,
+ WLAN_MODULE_HIF_UART,
+ WLAN_MODULE_LPI,
+ WLAN_MODULE_EXTSCAN,
+ WLAN_MODULE_UNIT_TEST,
+ WLAN_MODULE_MLME,
+ WLAN_MODULE_SUPPL,
+ WLAN_MODULE_ERE,
+ WLAN_MODULE_OCB,
+ WLAN_MODULE_RSSI_MONITOR,
+ WLAN_MODULE_WPM,
+ WLAN_MODULE_CSS,
+ WLAN_MODULE_PPS,
+ WLAN_MODULE_SCAN_CH_PREDICT,
+ WLAN_MODULE_MAWC,
+ WLAN_MODULE_CMC_QMIC,
+ WLAN_MODULE_EGAP,
+ WLAN_MODULE_NAN20,
+ WLAN_MODULE_QBOOST,
+ WLAN_MODULE_P2P_LISTEN_OFFLOAD,
+ WLAN_MODULE_HALPHY,
+ WLAN_WAL_MODULE_ENQ,
+ WLAN_MODULE_GNSS,
+ WLAN_MODULE_WAL_MEM,
+ WLAN_MODULE_SCHED_ALGO,
+ WLAN_MODULE_TX,
+ WLAN_MODULE_RX,
+ WLAN_MODULE_WLM,
+ WLAN_MODULE_RU_ALLOCATOR,
+ WLAN_MODULE_11K_OFFLOAD,
+ WLAN_MODULE_STA_TWT,
+ WLAN_MODULE_AP_TWT,
+ WLAN_MODULE_UL_OFDMA,
+ WLAN_MODULE_HPCS_PULSE,
+ WLAN_MODULE_DTF,
+ WLAN_MODULE_QUIET_IE,
+ WLAN_MODULE_SHMEM_MGR,
+ WLAN_MODULE_CFIR,
+ WLAN_MODULE_CODE_COVER,
+ WLAN_MODULE_SHO,
+ WLAN_MODULE_MLO_MGR,
+ WLAN_MODULE_PEER_INIT,
+ WLAN_MODULE_STA_MLO_PS,
+
+ WLAN_MODULE_ID_MAX,
+ WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
+};
+
+enum fw_dbglog_log_level {
+ ATH11K_FW_DBGLOG_ML = 0,
+ ATH11K_FW_DBGLOG_VERBOSE = 0,
+ ATH11K_FW_DBGLOG_INFO,
+ ATH11K_FW_DBGLOG_INFO_LVL_1,
+ ATH11K_FW_DBGLOG_INFO_LVL_2,
+ ATH11K_FW_DBGLOG_WARN,
+ ATH11K_FW_DBGLOG_ERR,
+ ATH11K_FW_DBGLOG_LVL_MAX
+};
+
+struct ath11k_fw_dbglog {
+ enum wmi_debug_log_param param;
+ union {
+ struct {
+ /* log_level values are given in enum fw_dbglog_log_level */
+ u16 log_level;
+ /* module_id values are given in enum fw_dbglog_wlan_module_id */
+ u16 module_id;
+ };
+ /* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
+ * according to param
+ */
+ u32 value;
+ };
+};
+
#ifdef CONFIG_ATH11K_DEBUGFS
int ath11k_debugfs_soc_create(struct ath11k_base *ab);
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
@@ -151,6 +306,13 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
+int ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng);
+
#else
static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
@@ -224,6 +386,22 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-#endif /* CONFIG_MAC80211_DEBUGFS*/
+static inline int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void
+ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS*/
#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 409d6cc5a1d5..e9dfa209098b 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -115,6 +115,8 @@ struct ath11k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
};
struct dp_full_mon_mpdu {
@@ -167,6 +169,7 @@ struct ath11k_mon_data {
struct ath11k_pdev_dp {
u32 mac_id;
+ u32 mon_dest_ring_stuck_cnt;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
@@ -1170,12 +1173,12 @@ struct ath11k_htt_ppdu_stats_msg {
u32 ppdu_id;
u32 timestamp;
u32 rsvd;
- u8 data[0];
+ u8 data[];
} __packed;
struct htt_tlv {
u32 header;
- u8 value[0];
+ u8 value[];
} __packed;
#define HTT_TLV_TAG GENMASK(11, 0)
@@ -1362,7 +1365,7 @@ struct htt_ppdu_stats_usr_cmn_array {
* tx_ppdu_stats_info is variable length, with length =
* number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
*/
- struct htt_tx_ppdu_stats_info tx_ppdu_info[0];
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[];
} __packed;
struct htt_ppdu_user_stats {
@@ -1424,7 +1427,7 @@ struct htt_ppdu_stats_info {
*/
struct htt_pktlog_msg {
u32 hdr;
- u8 payload[0];
+ u8 payload[];
};
/**
@@ -1645,7 +1648,7 @@ struct ath11k_htt_extd_stats_msg {
u32 info0;
u64 cookie;
u32 info1;
- u8 data[0];
+ u8 data[];
} __packed;
#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c212a789421e..049774cc158c 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -43,6 +43,13 @@ static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
}
static inline
+bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
+}
+
+static inline
u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
@@ -2313,7 +2320,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
u8 bw;
u8 rate_mcs, nss;
u8 sgi;
- bool is_cck;
+ bool is_cck, is_ldpc;
pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
@@ -2355,6 +2362,9 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
+ if (is_ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
break;
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
@@ -2642,9 +2652,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
spin_lock_bh(&srng->lock);
+try_again:
ath11k_hal_srng_access_begin(ab, srng);
-try_again:
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
@@ -3080,79 +3090,6 @@ move_next:
return num_buffs_reaped;
}
-int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- enum hal_rx_mon_status hal_status;
- struct sk_buff *skb;
- struct sk_buff_head skb_list;
- struct hal_rx_mon_ppdu_info ppdu_info;
- struct ath11k_peer *peer;
- struct ath11k_sta *arsta;
- int num_buffs_reaped = 0;
- u32 rx_buf_sz;
- u16 log_type = 0;
-
- __skb_queue_head_init(&skb_list);
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
- &skb_list);
- if (!num_buffs_reaped)
- goto exit;
-
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
-
- while ((skb = __skb_dequeue(&skb_list))) {
- if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
- rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
- } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
- rx_buf_sz = DP_RX_BUFFER_SIZE;
- }
-
- if (log_type)
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
- hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
-
- if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
- hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
-
- if (!peer || !peer->sta) {
- ath11k_dbg(ab, ATH11K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info.peer_id);
- goto next_skb;
- }
-
- arsta = (struct ath11k_sta *)peer->sta->drv_priv;
- ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
-
- if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
-next_skb:
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
-
- dev_kfree_skb_any(skb);
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
- }
-exit:
- return num_buffs_reaped;
-}
-
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
@@ -4870,7 +4807,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
- u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
u8 *dest, decap_format;
@@ -4912,38 +4848,27 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
- __le16 qos_field;
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
/* Base size */
- wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
- if (ieee80211_is_data_qos(wh->frame_control)) {
- struct ieee80211_qos_hdr *qwh =
- (struct ieee80211_qos_hdr *)hdr_desc;
-
- qos_field = qwh->qos_ctrl;
+ if (ieee80211_is_data_qos(wh->frame_control))
qos_pkt = 1;
- }
+
msdu = head_msdu;
while (msdu) {
- rx_desc = (struct hal_rx_desc *)msdu->data;
- hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
-
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
goto err_merge_fail;
- memcpy(dest, hdr_desc, wifi_hdr_len);
- memcpy(dest + wifi_hdr_len,
- (u8 *)&qos_field, sizeof(__le16));
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
}
- ath11k_dp_rx_msdus_set_payload(ar, msdu);
prev_buf = msdu;
msdu = msdu->next;
}
@@ -4967,8 +4892,98 @@ err_merge_fail:
return NULL;
}
+static void
+ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath11k_update_radiotap(struct ath11k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+
+ if (ppduinfo->nss)
+ rxs->nss = ppduinfo->nss;
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
struct sk_buff *tail_msdu,
struct napi_struct *napi)
{
@@ -5003,7 +5018,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
- rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
@@ -5022,6 +5037,12 @@ mon_deliver_fail:
return -EINVAL;
}
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 quota, struct napi_struct *napi)
{
@@ -5035,6 +5056,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
+ u32 mpdu_rx_bufs_used;
if (ar->ab->hw_params.rxdma1_enable)
ring_id = dp->rxdma_mon_dst_ring.ring_id;
@@ -5064,20 +5086,44 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
head_msdu = NULL;
tail_msdu = NULL;
- rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
- &head_msdu,
- &tail_msdu,
- &npackets, &ppdu_id);
+ mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ continue;
+ }
if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "dest_rx: new ppdu_id %x != status ppdu_id %x",
- ppdu_id, pmon->mon_ppdu_info.ppdu_id);
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
break;
}
if (head_msdu && tail_msdu) {
ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
}
@@ -5106,36 +5152,92 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
}
}
-static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
- int mac_id, u32 quota,
- struct napi_struct *napi)
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
{
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- struct hal_rx_mon_ppdu_info *ppdu_info;
- struct sk_buff *status_skb;
- u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
- struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
+ struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- ppdu_info = &pmon->mon_ppdu_info;
- rx_mon_stats = &pmon->rx_mon_stats;
+ __skb_queue_head_init(&skb_list);
- if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
- return;
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
- while (!skb_queue_empty(&pmon->rx_status_q)) {
- status_skb = skb_dequeue(&pmon->rx_status_q);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
- tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
- status_skb);
- if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ while ((skb = __skb_dequeue(&skb_list))) {
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else {
+ log_type = ATH11K_PKTLOG_TYPE_INVALID;
+ rx_buf_sz = 0;
+ }
+
+ if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+ memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
- dev_kfree_skb_any(status_skb);
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
}
+exit:
+ return num_buffs_reaped;
}
static u32
@@ -5352,6 +5454,7 @@ static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
tail_msdu, napi);
rx_mon_stats->dest_mpdu_done++;
ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
@@ -5489,22 +5592,6 @@ reap_status_ring:
return quota;
}
-static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- int num_buffs_reaped = 0;
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
- &pmon->rx_status_q);
- if (num_buffs_reaped)
- ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
-
- return num_buffs_reaped;
-}
-
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
@@ -5514,8 +5601,6 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
ab->hw_params.full_monitor_mode)
ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
- else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
- ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 91d6244b6543..00a45819907e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -351,7 +351,8 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |=
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
@@ -426,7 +427,7 @@ void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate, ru_tones;
- u8 mcs, rate_idx, ofdma;
+ u8 mcs, rate_idx = 0, ofdma;
int ret;
spin_lock_bh(&ab->base_lock);
@@ -518,9 +519,13 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
+ struct ieee80211_tx_status status = { 0 };
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -552,7 +557,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
@@ -583,12 +588,26 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
}
- /* NOTE: Tx rate status reporting. Tx completion status does not have
- * necessary information (for example nss) to build the tx rate.
- * Might end up reporting it out-of-band from HTT stats.
- */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ status.sta = peer->sta;
+ status.skb = msdu;
+ status.info = info;
+ rate = arsta->last_txrate;
+ status.rate = &rate;
- ieee80211_tx_status(ar->hw, msdu);
+ spin_unlock_bh(&ab->base_lock);
+
+ ieee80211_tx_status_ext(ar->hw, &status);
}
static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 406767672844..24e72e75a8c7 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -474,6 +474,7 @@ enum hal_tlv_tag {
#define HAL_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
#define HAL_TLV_ALIGN 4
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index a3b353a4b5f7..4bb1fbaed0c9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -453,10 +453,12 @@ void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
desc->info0));
ath11k_dbg(ab, ATH11k_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
- ath11k_dbg(ab, ATH11k_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
desc->last_rx_enqueue_timestamp,
desc->last_rx_dequeue_timestamp);
- ath11k_dbg(ab, ATH11k_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
desc->rx_bitmap[6], desc->rx_bitmap[7]);
@@ -802,12 +804,75 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
+#define HAL_MAX_UL_MU_USERS 37
+static inline void
+ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
+
+ rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
+}
+
+static inline void
+ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->mpdu_ok_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[6]));
+ rx_user_status->mpdu_err_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[8]));
+}
+
+static inline void
+ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+
+ ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
- u32 tlv_tag, u8 *tlv_data)
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
{
- u32 info0, info1;
+ u32 info0, info1, value;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@@ -828,6 +893,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(eu_stats->info0);
info1 = __le32_to_cpu(eu_stats->info1);
+ ppdu_info->ast_index =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
+ __le32_to_cpu(eu_stats->info2));
ppdu_info->tid =
ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
__le32_to_cpu(eu_stats->info6))) - 1;
@@ -851,6 +919,44 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->num_mpdu_fcs_err =
FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
info0);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats;
+
+ ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
+ __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
+ __le32_to_cpu(eu_stats->rsvd1[1]);
+
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
break;
}
case HAL_PHYRX_HT_SIG: {
@@ -949,50 +1055,151 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
else
ppdu_info->reception_type =
HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
break;
}
case HAL_PHYRX_HE_SIG_A_SU: {
struct hal_rx_he_sig_a_su_info *he_sig_a =
(struct hal_rx_he_sig_a_su_info *)tlv_data;
- u32 nsts, cp_ltf, dcm;
+ ppdu_info->he_flags = 1;
info0 = __le32_to_cpu(he_sig_a->info0);
info1 = __le32_to_cpu(he_sig_a->info1);
- ppdu_info->mcs =
- FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS,
- info0);
- ppdu_info->bw =
- FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW,
- info0);
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info0);
- ppdu_info->is_stbc = info1 &
- HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC;
- ppdu_info->beamformed = info1 &
- HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF;
- dcm = info0 & HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM;
- cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE,
- info0);
- nsts = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
- switch (cp_ltf) {
+ if (value == 0)
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
+ else
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
+ ppdu_info->mcs = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
+
+ he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
+ ppdu_info->dcm = he_dcm;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
+ ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+ he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
+ ppdu_info->is_stbc = he_stbc;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
+
+ /* data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /* data5 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
case 1:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- break;
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
case 2:
- ppdu_info->gi = HAL_RX_GI_1_6_US;
- break;
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
case 3:
- if (dcm && ppdu_info->is_stbc)
- ppdu_info->gi = HAL_RX_GI_0_8_US;
- else
- ppdu_info->gi = HAL_RX_GI_3_2_US;
- break;
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
}
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
+ ppdu_info->beamformed = value;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /* data6 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value++;
+ ppdu_info->nss = value;
+ ppdu_info->he_data6 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
- ppdu_info->nss = nsts + 1;
- ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@@ -1000,29 +1207,142 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
- u32 cp_ltf;
-
info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
- ppdu_info->bw =
- FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW,
- info0);
- cp_ltf = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE,
- info0);
-
- switch (cp_ltf) {
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ /*data3*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
+ he_stbc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
+
+ /*data4*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /*data5*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
case 1:
- ppdu_info->gi = HAL_RX_GI_0_8_US;
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
break;
case 2:
- ppdu_info->gi = HAL_RX_GI_1_6_US;
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
break;
case 3:
- ppdu_info->gi = HAL_RX_GI_3_2_US;
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
break;
}
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /*data6*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
+ info0);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
+ value);
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
+ value = value - 1;
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
+ value);
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
@@ -1040,7 +1360,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0);
ppdu_info->ru_alloc =
ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
-
+ ppdu_info->he_RU[0] = ru_tones;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
@@ -1050,14 +1370,25 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+ ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
ppdu_info->mcs =
- FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS,
- info0);
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
ppdu_info->nss =
- FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS,
- info0) + 1;
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING,
- info0);
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
break;
}
case HAL_PHYRX_HE_SIG_B2_OFDMA: {
@@ -1066,17 +1397,40 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
+
ppdu_info->mcs =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
+ he_dcm = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ /* HE-data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
ppdu_info->nss =
FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
info0) + 1;
ppdu_info->beamformed =
- info0 &
- HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
- ppdu_info->ldpc = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING,
- info0);
+ info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
}
@@ -1118,6 +1472,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
ppdu_info->rx_duration =
FIELD_GET(HAL_RX_PPDU_END_DURATION,
__le32_to_cpu(ppdu_rx_duration->info0));
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
break;
}
case HAL_DUMMY:
@@ -1141,12 +1498,14 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
u16 tlv_tag;
u16 tlv_len;
+ u32 tlv_userid = 0;
u8 *ptr = skb->data;
do {
tlv = (struct hal_tlv_hdr *)ptr;
tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
@@ -1158,7 +1517,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
- tlv_tag, ptr);
+ tlv_tag, ptr, tlv_userid);
ptr += tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index 571054c6d7f8..f6bae07abfd3 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -65,10 +65,6 @@ enum hal_rx_reception_type {
HAL_RX_RECEPTION_TYPE_MAX,
};
-#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
-#define HAL_TLV_STATUS_PPDU_DONE 1
-#define HAL_TLV_STATUS_BUF_DONE 2
-#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
#define HAL_RX_FCS_LEN 4
enum hal_rx_mon_status {
@@ -77,6 +73,40 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ dl_ofdma_ru_start_index:7,
+ dl_ofdma_ru_width:7,
+ dl_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[8];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
+
struct hal_sw_mon_ring_entries {
dma_addr_t mon_dst_paddr;
dma_addr_t mon_status_paddr;
@@ -107,6 +137,12 @@ struct hal_rx_mon_ppdu_info {
u8 mcs;
u8 nss;
u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
u8 is_stbc;
u8 gi;
u8 ldpc;
@@ -114,10 +150,46 @@ struct hal_rx_mon_ppdu_info {
u8 rssi_comb;
u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
u8 tid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
u8 dcm;
u8 ru_alloc;
u8 reception_type;
+ u64 tsft;
u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ char rssi_chain[8][8];
+ struct hal_rx_user_status userstats;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@@ -150,6 +222,9 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
+
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
__le32 info0;
@@ -164,6 +239,16 @@ struct hal_rx_ppdu_end_user_stats {
__le32 rsvd2[11];
} __packed;
+struct hal_rx_ppdu_end_user_stats_ext {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 info5;
+ u32 info6;
+} __packed;
+
#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
@@ -212,25 +297,62 @@ enum hal_rx_vht_sig_a_gi_setting {
HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
};
+#define HAL_RX_SU_MU_CODING_LDPC 0x01
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+#define HE_LTF_UNKNOWN 3
+
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
struct hal_rx_he_sig_a_su_info {
__le32 info0;
__le32 info1;
} __packed;
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
-
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
struct hal_rx_he_sig_a_mu_dl_info {
__le32 info0;
@@ -243,6 +365,7 @@ struct hal_rx_he_sig_b1_mu_info {
__le32 info0;
} __packed;
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
@@ -251,6 +374,7 @@ struct hal_rx_he_sig_b2_mu_info {
__le32 info0;
} __packed;
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
@@ -279,11 +403,14 @@ struct hal_rx_phyrx_rssi_legacy_info {
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
- __le32 rsvd1[21];
+ __le32 rsvd1[11];
+ __le32 info1;
+ __le32 rsvd2[9];
} __packed;
struct hal_rx_mpdu_info_wcn6855 {
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 3b0fdc1a6b3f..d1b0e76d9ec2 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -273,6 +273,12 @@ static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
+static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
@@ -444,6 +450,12 @@ static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
+static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
@@ -801,6 +813,12 @@ static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
return peer_id;
}
+static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -815,6 +833,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -853,6 +872,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -891,6 +911,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
@@ -929,6 +950,7 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
@@ -967,6 +989,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 29934b36c14e..27ca4a9c20fc 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -192,6 +192,8 @@ struct ath11k_hw_params {
bool wakeup_mhi;
bool supports_rssi_stats;
bool fw_wmi_diag_event;
+ bool current_cc_support;
+ bool dbr_debug_support;
};
struct ath11k_hw_ops {
@@ -210,6 +212,7 @@ struct ath11k_hw_ops {
u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 07f499d5ec92..d5b83f90d27a 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2319,6 +2319,9 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
return;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
@@ -2862,6 +2865,11 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
if (ret)
ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ar->hw_rate_code = hw_rate_code;
+
vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
@@ -4504,24 +4512,30 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
+ vif->type == NL80211_IFTYPE_STATION;
- if (ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION)
- goto free;
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ if (!skip_peer_delete) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab,
+ ATH11K_DBG_MAC,
+ "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ }
ath11k_mac_dec_num_stations(arvif, sta);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
+ if (skip_peer_delete && peer) {
+ peer->sta = NULL;
+ } else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
peer->sta = NULL;
@@ -4531,7 +4545,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
}
spin_unlock_bh(&ar->ab->base_lock);
-free:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -5566,7 +5579,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -6341,6 +6354,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
}
+ ret = ath11k_debugfs_add_interface(arvif);
+ if (ret)
+ goto err_peer_del;
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -6375,6 +6392,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
+ ath11k_debugfs_remove_interface(arvif);
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -6473,6 +6491,8 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
+ ath11k_debugfs_remove_interface(arvif);
+
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@@ -6610,12 +6630,13 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef,
+ struct ieee80211_chanctx_conf *ctx,
bool restart)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct wmi_vdev_start_req_arg arg = {};
+ const struct cfg80211_chan_def *chandef = &ctx->def;
int he_support = arvif->vif->bss_conf.he_support;
int ret = 0;
@@ -6650,8 +6671,7 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
arg.channel.chan_radar =
!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
- arg.channel.freq2_radar =
- !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ arg.channel.freq2_radar = ctx->radar_enabled;
arg.channel.passive = arg.channel.chan_radar;
@@ -6761,15 +6781,15 @@ err:
}
static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef)
+ struct ieee80211_chanctx_conf *ctx)
{
- return ath11k_mac_vdev_start_restart(arvif, chandef, false);
+ return ath11k_mac_vdev_start_restart(arvif, ctx, false);
}
static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
- const struct cfg80211_chan_def *chandef)
+ struct ieee80211_chanctx_conf *ctx)
{
- return ath11k_mac_vdev_start_restart(arvif, chandef, true);
+ return ath11k_mac_vdev_start_restart(arvif, ctx, true);
}
struct ath11k_mac_change_chanctx_arg {
@@ -6836,13 +6856,33 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
+ /* change_chanctx can be called even before vdev_up from
+ * ieee80211_start_ap->ieee80211_vif_use_channel->
+ * ieee80211_recalc_radar_chanctx.
+ *
+ * Firmware expect vdev_restart only if vdev is up.
+ * If vdev is down then it expect vdev_stop->vdev_start.
+ */
+ if (arvif->is_up) {
+ ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ } else {
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
+ if (ret)
+ ath11k_warn(ab, "failed to start vdev %d: %d\n",
+ arvif->vdev_id, ret);
- ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
- if (ret) {
- ath11k_warn(ab, "failed to restart vdev %d: %d\n",
- arvif->vdev_id, ret);
continue;
}
@@ -6927,7 +6967,8 @@ static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock;
- if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR)
ath11k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6947,7 +6988,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
if (WARN_ON(arvif->is_started))
return -EBUSY;
- ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@@ -6955,6 +6996,19 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return ret;
}
+ /* Reconfigure hardware rate code since it is cleared by firmware.
+ */
+ if (ar->hw_rate_code > 0) {
+ u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ ar->hw_rate_code);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ return ret;
+ }
+ }
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
@@ -7028,7 +7082,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, &ctx->def);
+ ret = ath11k_mac_vdev_start(arvif, ctx);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
@@ -8422,7 +8476,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
- ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index e4250ba8dfee..fc3524e83e52 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -13,6 +13,7 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define RDDM_DUMP_SIZE 0x420000
static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@@ -332,6 +333,7 @@ static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
return -ENOENT;
ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (ret)
return ret;
@@ -381,6 +383,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
mhi_ctrl->iova_stop = 0xFFFFFFFF;
}
+ mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
@@ -560,7 +563,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
ret = 0;
break;
case ATH11K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index de71ad594f34..903758751c99 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1571,6 +1571,11 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
+ return 0;
+ }
+
ret = ath11k_core_suspend(ab);
if (ret)
ath11k_warn(ab, "failed to suspend core: %d\n", ret);
@@ -1583,6 +1588,11 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
struct ath11k_base *ab = dev_get_drvdata(dev);
int ret;
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
+ return 0;
+ }
+
ret = ath11k_core_resume(ab);
if (ret)
ath11k_warn(ab, "failed to resume core: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 85471f8b3563..332886bc6b33 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -252,7 +252,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
- int ret;
+ int ret, fbret;
lockdep_assert_held(&ar->conf_mutex);
@@ -291,22 +291,8 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
- reinit_completion(&ar->peer_delete_done);
-
- ret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
- param->vdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- param->vdev_id, param->peer_addr);
- return ret;
- }
-
- ret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
- param->peer_addr);
- if (ret)
- return ret;
-
- return -ENOENT;
+ ret = -ENOENT;
+ goto cleanup;
}
peer->pdev_idx = ar->pdev_idx;
@@ -335,4 +321,24 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
spin_unlock_bh(&ar->ab->base_lock);
return 0;
+
+cleanup:
+ reinit_completion(&ar->peer_delete_done);
+
+ fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
+ param->vdev_id);
+ if (fbret) {
+ ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ param->vdev_id, param->peer_addr);
+ goto exit;
+ }
+
+ fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
+ param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
+
+exit:
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 65d3c6ba35ae..04e966830c18 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1932,10 +1932,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get hremote_node\n");
- return ret;
+ return -ENODEV;
}
ret = of_address_to_resource(hremote_node, 0, &res);
+ of_node_put(hremote_node);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get reg from hremote\n");
@@ -2341,6 +2342,7 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
dma_free_coherent(ab->dev, m3_mem->size,
m3_mem->vaddr, m3_mem->paddr);
m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
}
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
@@ -2958,7 +2960,11 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
clear_bit(ATH11K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
- ath11k_core_qmi_firmware_ready(ab);
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
}
@@ -3024,3 +3030,8 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_qmi_deinit_service);
+void ath11k_qmi_free_resource(struct ath11k_base *ab)
+{
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ath11k_qmi_m3_free(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index ba2eff4d59cb..61678de56ac7 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -492,5 +492,6 @@ void ath11k_qmi_event_work(struct work_struct *work);
void ath11k_qmi_msg_recv_work(struct work_struct *work);
void ath11k_qmi_deinit_service(struct ath11k_base *ab);
int ath11k_qmi_init_service(struct ath11k_base *ab);
+void ath11k_qmi_free_resource(struct ath11k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index d6575feca5a2..81e11cde31d7 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -48,6 +48,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
+ struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@@ -76,18 +77,26 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
- /* Set the country code to the firmware and wait for
+ /* Set the country code to the firmware and will receive
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
- init_country_param.flags = ALPHA_IS_SET;
- memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
- init_country_param.cc_info.alpha2[2] = 0;
+ if (ar->ab->hw_params.current_cc_support) {
+ memcpy(&set_current_param.alpha2, request->alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+ init_country_param.cc_info.alpha2[2] = 0;
- ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
- if (ret)
- ath11k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 79c50804d7dc..26ecc1bcd9d5 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -1445,7 +1445,7 @@ struct hal_rx_desc_ipq8074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc_qcn9074 {
@@ -1464,7 +1464,7 @@ struct hal_rx_desc_qcn9074 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc_wcn6855 {
@@ -1483,7 +1483,7 @@ struct hal_rx_desc_wcn6855 {
__le32 hdr_status_tag;
__le32 phy_ppdu_id;
u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
- u8 msdu_payload[0];
+ u8 msdu_payload[];
} __packed;
struct hal_rx_desc {
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 4100cc1449a2..2b18871d5f7c 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -107,7 +107,7 @@ struct spectral_search_fft_report {
__le32 info1;
__le32 info2;
__le32 reserve0;
- u8 bins[0];
+ u8 bins[];
} __packed;
struct ath11k_spectral_search_report {
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6b68ccf65e39..b4f86c45d81f 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -144,6 +144,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_11d_new_cc_ev) },
[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+ [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_add_dialog_event) },
};
#define PRIMAP(_hw_mode_) \
@@ -3085,11 +3087,12 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
/* TODO add MBSSID support */
cmd->mbss_support = 0;
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_ENABLE_CMDID);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 1;
}
return ret;
}
@@ -3114,11 +3117,181 @@ ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- ret = ath11k_wmi_cmd_send(wmi, skb,
- WMI_TWT_DISABLE_CMDID);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
if (ret) {
ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
dev_kfree_skb(skb);
+ } else {
+ ar->twt_enabled = 0;
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_add_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->wake_intvl_us = params->wake_intvl_us;
+ cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+ cmd->wake_dura_us = params->wake_dura_us;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->flags = params->twt_cmd;
+ if (params->flag_bcast)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+ if (params->flag_trigger)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+ if (params->flag_flow_type)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+ if (params->flag_protection)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+ cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+ cmd->flags);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to add twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_del_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi delete twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to delete twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_pause_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi pause twt dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to pause twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_resume_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->next_twt_size = params->next_twt_size;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+ cmd->next_twt_size);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to resume twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
}
return ret;
}
@@ -7532,6 +7705,66 @@ ath11k_wmi_diag_event(struct ath11k_base *ab,
trace_ath11k_wmi_diag(ab, skb->data, skb->len);
}
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+ switch (status) {
+ case WMI_ADD_TWT_STATUS_OK:
+ return "ok";
+ case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+ return "twt disabled";
+ case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+ return "dialog id in use";
+ case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+ return "invalid parameters";
+ case WMI_ADD_TWT_STATUS_NOT_READY:
+ return "not ready";
+ case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+ return "resource unavailable";
+ case WMI_ADD_TWT_STATUS_NO_ACK:
+ return "no ack";
+ case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+ return "no response";
+ case WMI_ADD_TWT_STATUS_DENIED:
+ return "denied";
+ case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+ fallthrough;
+ default:
+ return "unknown error";
+ }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_add_dialog_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse wmi twt add dialog status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+ goto exit;
+ }
+
+ if (ev->status)
+ ath11k_warn(ab,
+ "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+ ev->vdev_id, ev->dialog_id,
+ ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7629,11 +7862,17 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
ath11k_wmi_obss_color_collision_event(ab, skb);
break;
+ case WMI_TWT_ADD_DIALOG_EVENTID:
+ ath11k_wmi_twt_add_dialog_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
+ case WMI_TWT_DEL_DIALOG_EVENTID:
+ case WMI_TWT_PAUSE_DIALOG_EVENTID:
+ case WMI_TWT_RESUME_DIALOG_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
@@ -7798,6 +8037,59 @@ int ath11k_wmi_simulate_radar(struct ath11k *ar)
return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_debug_log_config_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ int ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->dbg_log_param = dbglog->param;
+
+ tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+ switch (dbglog->param) {
+ case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ break;
+ case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+ case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ memcpy(tlv->value, module_id_bitmap,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ /* clear current config to be used for next user config */
+ memset(module_id_bitmap, 0,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DBGLOG_CFG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
int ath11k_wmi_connect(struct ath11k_base *ab)
{
u32 i;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 2f26ec1a8aa3..587f42307250 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -12,6 +12,7 @@
struct ath11k_base;
struct ath11k;
struct ath11k_fw_stats;
+struct ath11k_fw_dbglog;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -4952,6 +4953,112 @@ struct wmi_twt_disable_params_cmd {
u32 pdev_id;
} __packed;
+enum WMI_HOST_TWT_COMMAND {
+ WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+ WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+ WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+ WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+ WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+ WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+ WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+ WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u8 twt_cmd;
+ u8 flag_bcast;
+ u8 flag_trigger;
+ u8 flag_flow_type;
+ u8 flag_protection;
+} __packed;
+
+enum wmi_twt_add_dialog_status {
+ WMI_ADD_TWT_STATUS_OK,
+ WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+ WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+ WMI_ADD_TWT_STATUS_INVALID_PARAM,
+ WMI_ADD_TWT_STATUS_NOT_READY,
+ WMI_ADD_TWT_STATUS_NO_RESOURCE,
+ WMI_ADD_TWT_STATUS_NO_ACK,
+ WMI_ADD_TWT_STATUS_NO_RESPONSE,
+ WMI_ADD_TWT_STATUS_DENIED,
+ WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
struct wmi_obss_spatial_reuse_params_cmd {
u32 tlv_header;
u32 pdev_id;
@@ -5240,6 +5347,21 @@ struct wmi_rfkill_state_change_ev {
u32 radio_state;
} __packed;
+enum wmi_debug_log_param {
+ WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_debug_log_config_cmd_fixed_param {
+ u32 tlv_header;
+ u32 dbg_log_param;
+ u32 value;
+} __packed;
+
#define WMI_MAX_MEM_REQS 32
#define MAX_RADIOS 3
@@ -5546,6 +5668,14 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
int ath11k_wmi_simulate_radar(struct ath11k *ar);
int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params);
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params);
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params);
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
@@ -5582,4 +5712,6 @@ int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
int ath11k_wmi_wow_enable(struct ath11k *ar);
int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]);
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog);
#endif
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 234ea939d316..f595204f493d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1395,10 +1395,6 @@ struct ath5k_hw {
u32 ah_txq_imr_nofrm;
u32 ah_txq_isr_txok_all;
- u32 ah_txq_isr_txurn;
- u32 ah_txq_isr_qcborn;
- u32 ah_txq_isr_qcburn;
- u32 ah_txq_isr_qtrig;
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index e6c52f7c26e7..d9e376eb040e 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -650,6 +650,7 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+ ah->ah_txq_isr_txok_all = 0;
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
@@ -670,13 +671,6 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
- /* Currently this is not much useful since we treat
- * all queues the same way if we get a TXURN (update
- * tx trigger level) but we might need it later on*/
- if (pisr & AR5K_ISR_TXURN)
- ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
- AR5K_SISR2_QCU_TXURN);
-
/* Misc Beacon related interrupts */
/* For AR5211 */
@@ -709,25 +703,16 @@ ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRORN);
- }
/* A queue got CBR underrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRURN);
- }
/* A queue got triggered */
- if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
+ if (unlikely(pisr & (AR5K_ISR_QTRIG)))
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
- AR5K_SISR4_QTRIG);
- }
data = pisr;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fbc2c19848f..d444b3d70ba2 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
}
}
+ if (idx == AR5K_EEPROM_N_PD_CURVES)
+ goto err_out;
+
ee->ee_pd_gains[mode] = 1;
pd = &chinfo[pier].pd_curves[idx];
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index b22ed499f7ba..a56fab6232a9 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -839,7 +839,7 @@ static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index aba70f35e574..65e683effdcb 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1217,6 +1217,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface)
static const struct usb_device_id ath6kl_usb_ids[] = {
{USB_DEVICE(0x0cf3, 0x9375)},
{USB_DEVICE(0x0cf3, 0x9374)},
+ {USB_DEVICE(0x04da, 0x390d)},
{ /* Terminating entry */ },
};
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index bd1ef6334997..3787b9fb0075 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1750,7 +1750,6 @@ static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
{
- u16 ap_info_entry_size;
struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
struct wmi_ap_info_v1 *ap_info_v1;
u8 index;
@@ -1759,14 +1758,12 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->ap_list_ver != APLIST_VER1)
return -EINVAL;
- ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
ath6kl_dbg(ATH6KL_DBG_WMI,
"number of APs in aplist event: %d\n", ev->num_ap);
- if (len < (int) (sizeof(struct wmi_aplist_event) +
- (ev->num_ap - 1) * ap_info_entry_size))
+ if (len < struct_size(ev, ap_list, ev->num_ap))
return -EINVAL;
/* AP list version 1 contents */
@@ -1959,21 +1956,15 @@ static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
{
struct sk_buff *skb;
struct wmi_start_scan_cmd *sc;
- s8 size;
int i, ret;
- size = sizeof(struct wmi_start_scan_cmd);
-
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
-
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
@@ -2008,7 +1999,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct wmi_begin_scan_cmd *sc;
- s8 size, *supp_rates;
+ s8 *supp_rates;
int i, band, ret;
struct ath6kl *ar = wmi->parent_dev;
int num_rates;
@@ -2023,18 +2014,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
num_chan, ch_list);
}
- size = sizeof(struct wmi_begin_scan_cmd);
-
if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
return -EINVAL;
if (num_chan > WMI_MAX_CHANNELS)
return -EINVAL;
- if (num_chan)
- size += sizeof(u16) * (num_chan - 1);
-
- skb = ath6kl_wmi_get_new_buf(size);
+ skb = ath6kl_wmi_get_new_buf(struct_size(sc, ch_list, num_chan));
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 784940ba4c90..672014973cee 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -863,7 +863,7 @@ struct wmi_begin_scan_cmd {
u8 num_ch;
/* channels in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/* wmi_start_scan_cmd is to be deprecated. Use
@@ -889,7 +889,7 @@ struct wmi_start_scan_cmd {
u8 num_ch;
/* channels in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/*
@@ -1373,7 +1373,7 @@ struct wmi_channel_list_reply {
u8 num_ch;
/* channel in Mhz */
- __le16 ch_list[1];
+ __le16 ch_list[];
} __packed;
/* List of Events (target to host) */
@@ -1545,7 +1545,7 @@ struct wmi_connect_event {
u8 beacon_ie_len;
u8 assoc_req_len;
u8 assoc_resp_len;
- u8 assoc_info[1];
+ u8 assoc_info[];
} __packed;
/* Disconnect Event */
@@ -1596,7 +1596,7 @@ struct wmi_disconnect_event {
u8 disconn_reason;
u8 assoc_resp_len;
- u8 assoc_info[1];
+ u8 assoc_info[];
} __packed;
/*
@@ -1637,7 +1637,7 @@ struct bss_bias {
struct bss_bias_info {
u8 num_bss;
- struct bss_bias bss_bias[0];
+ struct bss_bias bss_bias[];
} __packed;
struct low_rssi_scan_params {
@@ -1720,7 +1720,7 @@ struct wmi_neighbor_info {
struct wmi_neighbor_report_event {
u8 num_neighbors;
- struct wmi_neighbor_info neighbor[0];
+ struct wmi_neighbor_info neighbor[];
} __packed;
/* TKIP MIC Error Event */
@@ -1957,7 +1957,7 @@ union wmi_ap_info {
struct wmi_aplist_event {
u8 ap_list_ver;
u8 num_ap;
- union wmi_ap_info ap_list[1];
+ union wmi_ap_info ap_list[];
} __packed;
/* Developer Commands */
@@ -2051,7 +2051,7 @@ struct wmi_get_keepalive_cmd {
struct wmi_set_appie_cmd {
u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
u8 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
struct wmi_set_ie_cmd {
@@ -2059,7 +2059,7 @@ struct wmi_set_ie_cmd {
u8 ie_field; /* enum wmi_ie_field_type */
u8 ie_len;
u8 reserved;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
/* Notify the WSC registration status to the target */
@@ -2127,7 +2127,7 @@ struct wmi_add_wow_pattern_cmd {
u8 filter_list_id;
u8 filter_size;
u8 filter_offset;
- u8 filter[0];
+ u8 filter[];
} __packed;
struct wmi_del_wow_pattern_cmd {
@@ -2360,7 +2360,7 @@ struct wmi_send_action_cmd {
__le32 freq;
__le32 wait;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_send_mgmt_cmd {
@@ -2369,7 +2369,7 @@ struct wmi_send_mgmt_cmd {
__le32 wait;
__le32 no_cck;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_tx_status_event {
@@ -2389,7 +2389,7 @@ struct wmi_set_appie_extended_cmd {
u8 role_id;
u8 mgmt_frm_type;
u8 ie_len;
- u8 ie_info[0];
+ u8 ie_info[];
} __packed;
struct wmi_remain_on_chnl_event {
@@ -2406,18 +2406,18 @@ struct wmi_cancel_remain_on_chnl_event {
struct wmi_rx_action_event {
__le32 freq;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_capabilities_event {
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_rx_probe_req_event {
__le32 freq;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
#define P2P_FLAG_CAPABILITIES_REQ (0x00000001)
@@ -2431,7 +2431,7 @@ struct wmi_get_p2p_info {
struct wmi_p2p_info_event {
__le32 info_req_flags;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct wmi_p2p_capabilities {
@@ -2450,7 +2450,7 @@ struct wmi_p2p_probe_response_cmd {
__le32 freq;
u8 destination_addr[ETH_ALEN];
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/* Extended WMI (WMIX)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef6f5ea06c1f..3ccf8cfc6b63 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -1071,8 +1071,9 @@ struct ath_softc {
#endif
#ifdef CONFIG_ATH9K_HWRNG
+ struct hwrng rng_ops;
u32 rng_last;
- struct task_struct *rng_task;
+ char rng_name[sizeof("ath9k_65535")];
#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e6b3cd49ea18..efb7889142d4 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -670,8 +670,6 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
- int status;
-
if (AR_SREV_9300_20_OR_LATER(ah))
ah->eep_ops = &eep_ar9300_ops;
else if (AR_SREV_9287(ah)) {
@@ -685,7 +683,5 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
if (!ah->eep_ops->fill_eeprom(ah))
return -EIO;
- status = ah->eep_ops->check_eeprom(ah);
-
- return status;
+ return ah->eep_ops->check_eeprom(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 510e61e97dbc..994ec48b2f66 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
hdr->endpoint_id = epid;
hdr->flags = flags;
hdr->payload_len = cpu_to_be16(len);
+ memset(hdr->control, 0, sizeof(hdr->control));
status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
@@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target,
conn_msg->dl_pipeid = endpoint->dl_pipeid;
conn_msg->ul_pipeid = endpoint->ul_pipeid;
+ /* To prevent infoleak */
+ conn_msg->svc_meta_len = 0;
+ conn_msg->pad = 0;
+
ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
if (ret)
goto err;
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 39d46c203f6b..039bf0c35fbe 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,7 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
struct ath_mci_profile_info *info)
{
struct ath_mci_profile_info *entry;
- u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
+ static const u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
(info->type == MCI_GPM_COEX_PROFILE_VOICE))
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index f9d3d6eedd3c..cb5414265a9b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -21,11 +21,6 @@
#include "hw.h"
#include "ar9003_phy.h"
-#define ATH9K_RNG_BUF_SIZE 320
-#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 10) >> 5) /* quality: 10/32 */
-
-static DECLARE_WAIT_QUEUE_HEAD(rng_queue);
-
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
int i, j;
@@ -71,61 +66,56 @@ static u32 ath9k_rng_delay_get(u32 fail_stats)
return delay;
}
-static int ath9k_rng_kthread(void *data)
+static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
- int bytes_read;
- struct ath_softc *sc = data;
- u32 *rng_buf;
- u32 delay, fail_stats = 0;
-
- rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
- if (!rng_buf)
- goto out;
-
- while (!kthread_should_stop()) {
- bytes_read = ath9k_rng_data_read(sc, rng_buf,
- ATH9K_RNG_BUF_SIZE);
- if (unlikely(!bytes_read)) {
- delay = ath9k_rng_delay_get(++fail_stats);
- wait_event_interruptible_timeout(rng_queue,
- kthread_should_stop(),
- msecs_to_jiffies(delay));
- continue;
+ struct ath_softc *sc = container_of(rng, struct ath_softc, rng_ops);
+ u32 fail_stats = 0, word;
+ int bytes_read = 0;
+
+ for (;;) {
+ if (max & ~3UL)
+ bytes_read = ath9k_rng_data_read(sc, buf, max >> 2);
+ if ((max & 3UL) && ath9k_rng_data_read(sc, &word, 1)) {
+ memcpy(buf + bytes_read, &word, max & 3UL);
+ bytes_read += max & 3UL;
+ memzero_explicit(&word, sizeof(word));
}
+ if (!wait || !max || likely(bytes_read) || fail_stats > 110)
+ break;
- fail_stats = 0;
-
- /* sleep until entropy bits under write_wakeup_threshold */
- add_hwgenerator_randomness((void *)rng_buf, bytes_read,
- ATH9K_RNG_ENTROPY(bytes_read));
+ msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
}
- kfree(rng_buf);
-out:
- sc->rng_task = NULL;
-
- return 0;
+ if (wait && !bytes_read && max)
+ bytes_read = -EIO;
+ return bytes_read;
}
void ath9k_rng_start(struct ath_softc *sc)
{
+ static atomic_t serial = ATOMIC_INIT(0);
struct ath_hw *ah = sc->sc_ah;
- if (sc->rng_task)
+ if (sc->rng_ops.read)
return;
if (!AR_SREV_9300_20_OR_LATER(ah))
return;
- sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng");
- if (IS_ERR(sc->rng_task))
- sc->rng_task = NULL;
+ snprintf(sc->rng_name, sizeof(sc->rng_name), "ath9k_%u",
+ (atomic_inc_return(&serial) - 1) & U16_MAX);
+ sc->rng_ops.name = sc->rng_name;
+ sc->rng_ops.read = ath9k_rng_read;
+ sc->rng_ops.quality = 320;
+
+ if (devm_hwrng_register(sc->dev, &sc->rng_ops))
+ sc->rng_ops.read = NULL;
}
void ath9k_rng_stop(struct ath_softc *sc)
{
- if (sc->rng_task) {
- kthread_stop(sc->rng_task);
- sc->rng_task = NULL;
+ if (sc->rng_ops.read) {
+ devm_hwrng_unregister(sc->dev, &sc->rng_ops);
+ sc->rng_ops.read = NULL;
}
}
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 84a8ce0784b1..ba29b4aebe9f 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -458,7 +458,6 @@ struct ar9170 {
# define CARL9170_HWRNG_CACHE_SIZE CARL9170_MAX_CMD_PAYLOAD_LEN
struct {
struct hwrng rng;
- bool initialized;
char name[30 + 1];
u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)];
unsigned int cache_idx;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 503b21abbba5..10acb6ad30d0 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -149,7 +149,7 @@ struct carl9170fw_fix_entry {
struct carl9170fw_fix_desc {
struct carl9170fw_desc_head head;
- struct carl9170fw_fix_entry data[0];
+ struct carl9170fw_fix_entry data[];
} __packed;
#define CARL9170FW_FIX_DESC_SIZE \
(sizeof(struct carl9170fw_fix_desc))
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 49f7ee1c912b..76e84adf57c1 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1412,7 +1412,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!tid_info)
return -ENOMEM;
@@ -1494,7 +1494,7 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
if (!(ar->features & CARL9170_WPS_BUTTON))
return 0;
- input = input_allocate_device();
+ input = devm_input_allocate_device(&ar->udev->dev);
if (!input)
return -ENOMEM;
@@ -1512,10 +1512,8 @@ static int carl9170_register_wps_button(struct ar9170 *ar)
input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
err = input_register_device(input);
- if (err) {
- input_free_device(input);
+ if (err)
return err;
- }
ar->wps.pbc = input;
return 0;
@@ -1539,7 +1537,7 @@ static int carl9170_rng_get(struct ar9170 *ar)
BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
- if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
+ if (!IS_ACCEPTING_CMD(ar))
return -EAGAIN;
count = ARRAY_SIZE(ar->rng.cache);
@@ -1585,14 +1583,6 @@ static int carl9170_rng_read(struct hwrng *rng, u32 *data)
return sizeof(u16);
}
-static void carl9170_unregister_hwrng(struct ar9170 *ar)
-{
- if (ar->rng.initialized) {
- hwrng_unregister(&ar->rng.rng);
- ar->rng.initialized = false;
- }
-}
-
static int carl9170_register_hwrng(struct ar9170 *ar)
{
int err;
@@ -1603,25 +1593,14 @@ static int carl9170_register_hwrng(struct ar9170 *ar)
ar->rng.rng.data_read = carl9170_rng_read;
ar->rng.rng.priv = (unsigned long)ar;
- if (WARN_ON(ar->rng.initialized))
- return -EALREADY;
-
- err = hwrng_register(&ar->rng.rng);
+ err = devm_hwrng_register(&ar->udev->dev, &ar->rng.rng);
if (err) {
dev_err(&ar->udev->dev, "Failed to register the random "
"number generator (%d)\n", err);
return err;
}
- ar->rng.initialized = true;
-
- err = carl9170_rng_get(ar);
- if (err) {
- carl9170_unregister_hwrng(ar);
- return err;
- }
-
- return 0;
+ return carl9170_rng_get(ar);
}
#endif /* CONFIG_CARL9170_HWRNG */
@@ -1914,7 +1893,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
WARN_ON(!(tx_streams >= 1 && tx_streams <=
IEEE80211_HT_MCS_TX_MAX_STREAMS));
- tx_params = (tx_streams - 1) <<
+ tx_params |= (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
@@ -1937,7 +1916,8 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
if (!bands)
return -EINVAL;
- ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
+ ar->survey = devm_kcalloc(&ar->udev->dev, chans,
+ sizeof(struct survey_info), GFP_KERNEL);
if (!ar->survey)
return -ENOMEM;
ar->num_channels = chans;
@@ -1964,11 +1944,7 @@ int carl9170_register(struct ar9170 *ar)
struct ath_regulatory *regulatory = &ar->common.regulatory;
int err = 0, i;
- if (WARN_ON(ar->mem_bitmap))
- return -EINVAL;
-
- ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL);
-
+ ar->mem_bitmap = devm_bitmap_zalloc(&ar->udev->dev, ar->fw.mem_blocks, GFP_KERNEL);
if (!ar->mem_bitmap)
return -ENOMEM;
@@ -2057,17 +2033,6 @@ void carl9170_unregister(struct ar9170 *ar)
carl9170_debugfs_unregister(ar);
#endif /* CONFIG_CARL9170_DEBUGFS */
-#ifdef CONFIG_CARL9170_WPC
- if (ar->wps.pbc) {
- input_unregister_device(ar->wps.pbc);
- ar->wps.pbc = NULL;
- }
-#endif /* CONFIG_CARL9170_WPC */
-
-#ifdef CONFIG_CARL9170_HWRNG
- carl9170_unregister_hwrng(ar);
-#endif /* CONFIG_CARL9170_HWRNG */
-
carl9170_cancel_worker(ar);
cancel_work_sync(&ar->restart_work);
@@ -2082,12 +2047,6 @@ void carl9170_free(struct ar9170 *ar)
kfree_skb(ar->rx_failover);
ar->rx_failover = NULL;
- bitmap_free(ar->mem_bitmap);
- ar->mem_bitmap = NULL;
-
- kfree(ar->survey);
- ar->survey = NULL;
-
mutex_destroy(&ar->mutex);
ieee80211_free_hw(ar->hw);
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index bb73553fd7c2..0a4e42e806b9 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -327,7 +327,7 @@ struct _carl9170_tx_superdesc {
struct _carl9170_tx_superframe {
struct _carl9170_tx_superdesc s;
struct _ar9170_tx_hwdesc f;
- u8 frame_data[0];
+ u8 frame_data[];
} __packed __aligned(4);
#define CARL9170_TX_SUPERDESC_LEN 24
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 75cb53a3ec15..27f4d74a41c8 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -197,7 +197,7 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd,
static struct channel_detector *
channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
- u32 sz, i;
+ u32 i;
struct channel_detector *cd;
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
@@ -206,8 +206,8 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
- sz = sizeof(cd->detectors) * dpd->num_radar_types;
- cd->detectors = kzalloc(sz, GFP_ATOMIC);
+ cd->detectors = kmalloc_array(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
if (cd->detectors == NULL)
goto fail;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index b2400e2417a5..f15e7bd690b5 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -667,14 +667,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
/*
* Some users have reported their EEPROM programmed with
- * 0x8000 or 0x0 set, this is not a supported regulatory
- * domain but since we have more than one user with it we
- * need a solution for them. We default to 0x64, which is
- * the default Atheros world regulatory domain.
+ * 0x8000 set, this is not a supported regulatory domain
+ * but since we have more than one user with it we need
+ * a solution for them. We default to 0x64, which is the
+ * default Atheros world regulatory domain.
*/
static void ath_regd_sanitize(struct ath_regulatory *reg)
{
- if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0)
+ if (reg->current_rd != COUNTRY_ERD_FLAG)
return;
printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n");
reg->current_rd = 0x64;
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
index e14f374f97d4..fe187c1fbeb0 100644
--- a/drivers/net/wireless/ath/spectral_common.h
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -108,7 +108,7 @@ struct fft_sample_ath10k {
u8 avgpwr_db;
u8 max_exp;
- u8 data[0];
+ u8 data[];
} __packed;
struct fft_sample_ath11k {
@@ -123,7 +123,7 @@ struct fft_sample_ath11k {
__be32 tsf;
__be32 noise;
- u8 data[0];
+ u8 data[];
} __packed;
#endif /* SPECTRAL_COMMON_H */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 9575d7373bf2..95ea7d040d8c 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -331,6 +331,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
INIT_LIST_HEAD(&wcn->vif_list);
spin_lock_init(&wcn->dxe_lock);
+ spin_lock_init(&wcn->survey_lock);
return 0;
@@ -392,11 +393,41 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
{
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel = NULL;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(wcn->hw->wiphy->bands); i++) {
+ band = wcn->hw->wiphy->bands[i];
+ if (!band)
+ break;
+ for (j = 0; j < band->n_channels; j++) {
+ if (HW_VALUE_CHANNEL(band->channels[j].hw_value) == ch) {
+ channel = &band->channels[j];
+ break;
+ }
+ }
+ if (channel)
+ break;
+ }
+
+ if (!channel) {
+ wcn36xx_err("Cannot tune to channel %d\n", ch);
+ return;
+ }
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+ wcn->band = band;
+ wcn->channel = channel;
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
+
+ return;
}
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
@@ -1326,6 +1357,49 @@ static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
}
+static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct wcn36xx_chan_survey *chan_survey;
+ int band_idx;
+ unsigned long flags;
+
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ band_idx = idx;
+ if (band_idx >= sband->n_channels) {
+ band_idx -= sband->n_channels;
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_5GHZ];
+ }
+
+ if (!sband || band_idx >= sband->n_channels)
+ return -ENOENT;
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+
+ chan_survey = &wcn->chan_survey[idx];
+ survey->channel = &sband->channels[band_idx];
+ survey->noise = chan_survey->rssi - chan_survey->snr;
+ survey->filled = 0;
+
+ if (chan_survey->rssi > -100 && chan_survey->rssi < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (survey->channel == wcn->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "ch %d rssi %d snr %d noise %d filled %x freq %d\n",
+ HW_VALUE_CHANNEL(survey->channel->hw_value),
+ chan_survey->rssi, chan_survey->snr, survey->noise,
+ survey->filled, survey->channel->center_freq);
+
+ return 0;
+}
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1354,6 +1428,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
.flush = wcn36xx_flush,
+ .get_survey = wcn36xx_get_survey,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
@@ -1446,25 +1521,20 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
{
struct device_node *mmio_node;
struct device_node *iris_node;
- struct resource *res;
int index;
int ret;
/* Set TX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx");
- if (!res) {
- wcn36xx_err("failed to get tx_irq\n");
- return -ENOENT;
- }
- wcn->tx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret < 0)
+ return ret;
+ wcn->tx_irq = ret;
/* Set RX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx");
- if (!res) {
- wcn36xx_err("failed to get rx_irq\n");
- return -ENOENT;
- }
- wcn->rx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0)
+ return ret;
+ wcn->rx_irq = ret;
/* Acquire SMSM tx enable handle */
wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
@@ -1513,6 +1583,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
+ if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
+ of_device_is_compatible(iris_node, "qcom,wcn3660b"))
+ wcn->rf_id = RF_IRIS_WCN3660;
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);
@@ -1535,6 +1608,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
void *wcnss;
int ret;
const u8 *addr;
+ int n_channels;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
@@ -1562,6 +1636,13 @@ static int wcn36xx_probe(struct platform_device *pdev)
goto out_wq;
}
+ n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
+ wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ if (!wcn->chan_survey) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index caeb68901326..59ad332156ae 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -3347,7 +3347,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
case WCN36XX_HAL_SCAN_OFFLOAD_IND:
- msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
+ msg_ind = kmalloc(struct_size(msg_ind, msg, len), GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
msg_header->msg_type);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index dd58dde8c836..df749b114568 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -23,6 +23,11 @@ static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
+static inline int get_snr(struct wcn36xx_rx_bd *bd)
+{
+ return ((bd->phy_stat1 >> 24) & 0xff);
+}
+
struct wcn36xx_rate {
u16 bitrate;
u16 mcs_or_legacy_index;
@@ -266,6 +271,34 @@ static void __skb_queue_purge_irq(struct sk_buff_head *list)
dev_kfree_skb_irq(skb);
}
+static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
+ int band, int freq)
+{
+ static struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *sband;
+ int idx;
+ int i;
+
+ idx = 0;
+ if (band == NL80211_BAND_5GHZ)
+ idx = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
+
+ sband = wcn->hw->wiphy->bands[band];
+ channel = sband->channels;
+
+ for (i = 0; i < sband->n_channels; i++, channel++) {
+ if (channel->center_freq == freq) {
+ idx += i;
+ break;
+ }
+ }
+
+ spin_lock(&wcn->survey_lock);
+ wcn->chan_survey[idx].rssi = rssi;
+ wcn->chan_survey[idx].snr = snr;
+ spin_unlock(&wcn->survey_lock);
+}
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
@@ -343,6 +376,9 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
+ wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
+ status.band, status.freq);
+
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index fbd0558c2c19..9aa08b636d08 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -97,6 +97,7 @@ enum wcn36xx_ampdu_state {
#define RF_UNKNOWN 0x0000
#define RF_IRIS_WCN3620 0x3620
+#define RF_IRIS_WCN3660 0x3660
#define RF_IRIS_WCN3680 0x3680
static inline void buff_to_be(u32 *buf, size_t len)
@@ -194,7 +195,14 @@ struct wcn36xx_sta {
enum wcn36xx_ampdu_state ampdu_state[16];
int non_agg_frame_ct;
};
+
struct wcn36xx_dxe_ch;
+
+struct wcn36xx_chan_survey {
+ s8 rssi;
+ u8 snr;
+};
+
struct wcn36xx {
struct ieee80211_hw *hw;
struct device *dev;
@@ -281,6 +289,12 @@ struct wcn36xx {
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel;
+
+ spinlock_t survey_lock; /* protects chan_survey */
+ struct wcn36xx_chan_survey *chan_survey;
};
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index cc830c795b33..5704defd7be1 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -958,7 +958,7 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
if (gro)
napi_gro_receive(&wil->napi_rx, skb);
else
- netif_rx_ni(skb);
+ netif_rx(skb);
}
ndev->stats.rx_packets++;
stats->rx_packets++;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index dd8abbb28849..98b4c189eecc 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1199,7 +1199,7 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
eth->h_proto = cpu_to_be16(ETH_P_PAE);
skb_put_data(skb, evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
- if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += sz;
if (stats) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 3984fd7d918e..2c95a08a5871 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
}
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
- struct sk_buff *skb, bool inirq)
+ struct sk_buff *skb)
{
- brcmf_fws_rxreorder(ifp, skb, inirq);
+ brcmf_fws_rxreorder(ifp, skb);
}
static void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index ba52318615ae..f0ad1e23f3c8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -16,6 +16,7 @@
#include <brcmu_utils.h>
#include <defs.h>
#include <brcmu_wifi.h>
+#include <brcm_hw_ids.h>
#include "core.h"
#include "debug.h"
#include "tracepoint.h"
@@ -4622,7 +4623,7 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
- s32 pktflags[] = {
+ static const s32 pktflags[] = {
BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
@@ -7476,6 +7477,16 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
vif_event_equals(event, action), timeout);
}
+static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
+{
+ switch (drvr->bus_if->chip) {
+ case BRCM_CC_4345_CHIP_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
struct brcmf_fil_country_le *ccreq)
{
@@ -7484,18 +7495,28 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
s32 found_index;
int i;
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device\n");
- return -EINVAL;
- }
-
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ if (brmcf_use_iso3166_ccode_fallback(drvr)) {
+ brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->ccode[0] = alpha2[0];
+ ccreq->ccode[1] = alpha2[1];
+ return 0;
+ }
+
+ brcmf_dbg(TRACE, "No country codes configured for device\n");
+ return -EINVAL;
+ }
+
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 1ee49f9e325d..4ec7773b6906 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -704,6 +704,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
{
switch (ci->pub.chip) {
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43454_CHIP_ID:
return 0x198000;
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
@@ -1401,6 +1402,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
+ case BRCM_CC_43454_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
fallthrough;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index fed9cd5f29a2..26fab4bee22c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -400,7 +400,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
* STA connects to the AP interface. This is an obsoleted standard most
@@ -423,15 +423,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
- if (inirq) {
- netif_rx(skb);
- } else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
- */
- netif_rx_ni(skb);
- }
+ netif_rx(skb);
}
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
@@ -480,7 +472,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
- brcmf_netif_rx(ifp, skb, false);
+ brcmf_netif_rx(ifp, skb);
}
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
@@ -515,7 +507,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
return;
if (brcmf_proto_is_reorder_skb(skb)) {
- brcmf_proto_rxreorder(ifp, skb, inirq);
+ brcmf_proto_rxreorder(ifp, skb);
} else {
/* Process special event packets */
if (handle_event) {
@@ -524,7 +516,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
brcmf_fweh_process_skb(ifp->drvr, skb,
BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
}
- brcmf_netif_rx(ifp, skb, inirq);
+ brcmf_netif_rx(ifp, skb);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 8212c9de14f1..340346c122d3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_detach(struct net_device *ndev, bool locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 7c68d9849324..d2ac844e1e9f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -248,7 +248,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_firmware_capabilities(ifp);
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
- drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
+ drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
+ drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index d99140960a82..dcbe55b56e43 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -207,6 +207,8 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
size = BRCMF_FW_MAX_NVRAM_SIZE;
else
size = data_len;
+ /* Add space for properties we may add */
+ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index d5578ca681bb..72fe8bce6eaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -192,7 +192,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, const char *data, u32 datalen,
+brcmf_create_iovar(const char *name, const char *data, u32 datalen,
char *buf, u32 buflen)
{
u32 len;
@@ -213,7 +213,7 @@ brcmf_create_iovar(char *name, const char *data, u32 datalen,
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -241,7 +241,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
}
s32
-brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -272,7 +272,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
}
s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -280,7 +280,7 @@ brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
}
s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -292,7 +292,7 @@ brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
}
static u32
-brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
+brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
char *buf, u32 buflen)
{
const s8 *prefix = "bsscfg:";
@@ -337,7 +337,7 @@ brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
}
s32
-brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -366,7 +366,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
}
s32
-brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -396,7 +396,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
}
s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -405,7 +405,7 @@ brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
}
s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -417,7 +417,7 @@ brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
return err;
}
-static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
+static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
{
u32 iolen;
@@ -438,7 +438,7 @@ static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
return iolen;
}
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -466,7 +466,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
return err;
}
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -495,7 +495,7 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
return err;
}
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
{
__le32 data_le = cpu_to_le32(data);
@@ -503,7 +503,7 @@ s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
sizeof(data_le));
}
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
@@ -514,12 +514,12 @@ s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
return err;
}
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data)
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
{
return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
}
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data)
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
{
__le16 data_le = cpu_to_le16(*data);
s32 err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index cb26f8c59c21..bc693157c4b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -84,26 +84,26 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len);
-s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data);
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index e69d1e56996f..c87b829adb0d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -1068,7 +1068,7 @@ struct brcmf_mkeep_alive_pkt_le {
__le32 period_msec;
__le16 len_bytes;
u8 keep_alive_id;
- u8 data[0];
+ u8 data[];
} __packed;
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 19b0f318f93e..d58525ebe618 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
rfi->pend_pkts -= skb_queue_len(skb_list);
}
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
{
struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
@@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
/* validate flags and flow id */
if (flags == 0xFF) {
bphy_err(drvr, "invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
return;
}
@@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt, inirq);
+ brcmf_netif_rx(ifp, pkt);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 50e424b5880d..b16a9d1c0508 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 7c8e08ee8f0f..b2d0f7570aa9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -536,8 +536,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
-static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
- bool inirq)
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
}
@@ -1191,7 +1190,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_netif_rx(ifp, skb, false);
+ brcmf_netif_rx(ifp, skb);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 513c7e6421b2..8623bde5eb70 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -71,16 +71,18 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root) {
- int i, len;
+ int i;
char *board_type;
const char *tmp;
of_property_read_string_index(root, "compatible", 0, &tmp);
/* get rid of '/' in the compatible string to be able to find the FW */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type) {
+ of_node_put(root);
+ return;
+ }
for (i = 0; i < board_type[i]; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 4735063e4c03..479041f070f9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -90,8 +90,8 @@
#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */
#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */
#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */
-#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */
-#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comeback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comeback Response AF */
#define BRCMF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500)
@@ -158,7 +158,7 @@ struct brcmf_p2p_pub_act_frame {
u8 oui_type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
@@ -177,7 +177,7 @@ struct brcmf_p2p_action_frame {
u8 type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
@@ -192,7 +192,7 @@ struct brcmf_p2psd_gas_pub_act_frame {
u8 category;
u8 action;
u8 dialog_token;
- u8 query_data[1];
+ u8 query_data[];
};
/**
@@ -225,7 +225,7 @@ static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
return false;
pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ if (frame_len < sizeof(*pact_frm))
return false;
if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
@@ -253,7 +253,7 @@ static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
return false;
act_frm = (struct brcmf_p2p_action_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ if (frame_len < sizeof(*act_frm))
return false;
if (act_frm->category == P2P_AF_CATEGORY &&
@@ -280,7 +280,7 @@ static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
return false;
sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ if (frame_len < sizeof(*sd_act_frm))
return false;
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
@@ -396,11 +396,11 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
(tx) ? "TX" : "RX");
break;
case P2PSD_ACTION_ID_GAS_CREQ:
- brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+ brcmf_dbg(TRACE, "%s P2P GAS Comeback Request\n",
(tx) ? "TX" : "RX");
break;
case P2PSD_ACTION_ID_GAS_CRESP:
- brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+ brcmf_dbg(TRACE, "%s P2P GAS Comeback Response\n",
(tx) ? "TX" : "RX");
break;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 8b149996fc00..97f0f13dfe50 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <soc.h>
@@ -59,6 +60,13 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+/* firmware config files */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
+
+/* per-board firmware binaries */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
+
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
@@ -448,47 +456,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
static void
-brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
- void *srcaddr, u32 len)
-{
- void __iomem *address = devinfo->tcm + mem_offset;
- __le32 *src32;
- __le16 *src16;
- u8 *src8;
-
- if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
- if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
- src8 = (u8 *)srcaddr;
- while (len) {
- iowrite8(*src8, address);
- address++;
- src8++;
- len--;
- }
- } else {
- len = len / 2;
- src16 = (__le16 *)srcaddr;
- while (len) {
- iowrite16(le16_to_cpu(*src16), address);
- address += 2;
- src16++;
- len--;
- }
- }
- } else {
- len = len / 4;
- src32 = (__le32 *)srcaddr;
- while (len) {
- iowrite32(le32_to_cpu(*src32), address);
- address += 4;
- src32++;
- len--;
- }
- }
-}
-
-
-static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
void *dstaddr, u32 len)
{
@@ -777,6 +744,8 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
return;
console = &devinfo->shared.console;
+ if (!console->base_addr)
+ return;
addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
newidx = brcmf_pcie_read_tcm32(devinfo, addr);
while (newidx != console->read_idx) {
@@ -1348,6 +1317,18 @@ static void brcmf_pcie_down(struct device *dev)
{
}
+static int brcmf_pcie_preinit(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+ brcmf_dbg(PCIE, "Enter\n");
+
+ brcmf_pcie_intr_enable(buspub->devinfo);
+ brcmf_pcie_hostready(buspub->devinfo);
+
+ return 0;
+}
static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
{
@@ -1456,6 +1437,7 @@ static int brcmf_pcie_reset(struct device *dev)
}
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+ .preinit = brcmf_pcie_preinit,
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
.txctl = brcmf_pcie_tx_ctlpkt,
@@ -1540,6 +1522,7 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
shared->max_rxbufpost, shared->rx_dataoffset);
brcmf_pcie_bus_console_init(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
return 0;
}
@@ -1563,8 +1546,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
return err;
brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
- brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
- (void *)fw->data, fw->size);
+ memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
+ (void *)fw->data, fw->size);
resetintr = get_unaligned_le32(fw->data);
release_firmware(fw);
@@ -1578,7 +1561,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
address = devinfo->ci->rambase + devinfo->ci->ramsize -
nvram_len;
- brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+ memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
brcmf_fw_nvram_free(nvram);
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
@@ -1777,6 +1760,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
ret = brcmf_chip_get_raminfo(devinfo->ci);
if (ret) {
brcmf_err(bus, "Failed to get RAM info\n");
+ release_firmware(fw);
+ brcmf_fw_nvram_free(nvram);
goto fail;
}
@@ -1826,9 +1811,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
init_waitqueue_head(&devinfo->mbdata_resp_wait);
- brcmf_pcie_intr_enable(devinfo);
- brcmf_pcie_hostready(devinfo);
-
ret = brcmf_attach(&devinfo->pdev->dev);
if (ret)
goto fail;
@@ -1980,6 +1962,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
return;
devinfo = bus->bus_priv.pcie->devinfo;
+ brcmf_pcie_bus_console_read(devinfo, false);
devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
if (devinfo->ci)
@@ -2106,6 +2089,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_RAW_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index f4a79e217da5..bd08d3aaa8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -32,7 +32,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
- void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
void (*add_if)(struct brcmf_if *ifp);
void (*del_if)(struct brcmf_if *ifp);
void (*reset_if)(struct brcmf_if *ifp);
@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
}
static inline void
-brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
- ifp->drvr->proto->rxreorder(ifp, skb, inirq);
+ ifp->drvr->proto->rxreorder(ifp, skb);
}
static inline void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8effeb7a7269..ba3c159111d3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -629,7 +629,6 @@ BRCMF_FW_CLM_DEF(43752, "brcmfmac43752-sdio");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin");
@@ -652,6 +651,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFC, 43430B0),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
+ BRCMF_FW_ENTRY(BRCM_CC_43454_CHIP_ID, 0x00000040, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
index e1930ce1b642..b2c7ae8966a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h
@@ -15,7 +15,7 @@
struct brcmf_xtlv {
u16 id;
u16 len;
- u8 data[0];
+ u8 data[];
};
enum brcmf_xtlv_option {
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 9d81320164ce..ed0b707f0cdf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -32,6 +32,7 @@
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345
+#define BRCM_CC_43454_CHIP_ID 43454
#define BRCM_CC_43465_CHIP_ID 43465
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_43525_CHIP_ID 43525
@@ -71,6 +72,7 @@
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_43570_RAW_DEVICE_ID 0xaa31
#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_4359_DEVICE_ID 0x43ef
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 452d08545d31..10daef81c355 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -545,7 +545,7 @@ struct ConfigRid {
#define MODE_CFG_MASK cpu_to_le16(0xff)
#define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */
#define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */
-#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */
+#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extensions */
#define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */
#define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */
#define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 85e704283755..a647a406b87b 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -139,6 +139,7 @@ config IWLMEI
tristate "Intel Management Engine communication over WLAN"
depends on INTEL_MEI
depends on PM
+ depends on CFG80211
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 330ef04ca51a..8ff967edc8f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-prph.h"
+#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 69
+#define IWL_22000_UCODE_API_MAX 72
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -39,6 +40,7 @@
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
+#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
@@ -119,8 +121,6 @@
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
-#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
- IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@@ -224,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 1024, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -285,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_256_ba_txq_size = 1024, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -299,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
}, \
+ }, \
+ .mon_dbgi_regs = { \
+ .write_ptr = { \
+ .addr = DBGI_SRAM_FIFO_POINTERS, \
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
+ }, \
}
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
@@ -385,6 +391,21 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
+const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .umac_prph_offset = 0x300000,
+ .integrated = true,
+ .low_latency_xtal = true,
+ .xtal_latency = 12000,
+ .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+ .imr_enabled = true,
+};
+
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@@ -476,6 +497,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
+const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
@@ -816,6 +838,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = {
+ .fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = {
+ .fw_name_pre = IWL_SO_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
.uhb_supported = true,
@@ -830,6 +866,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = {
+ .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
+ .uhb_supported = false,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
IWL_DEVICE_AX210,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 754876cd27ce..e8bd4f0e3d2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -299,7 +299,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return ret;
}
static void iwlagn_mac_stop(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 90b9becd1673..caf452922dbd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -48,6 +48,7 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search.
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index db0c41bbeb0e..e9d2717362cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018, 2020 Intel Corporation
+ * Copyright(c) 2018, 2020-2021 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -915,7 +915,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
len += 1 + 2;
copylen += 1 + 2;
- new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+ new_data = kmalloc(struct_size(new_data, data, len), GFP_ATOMIC);
if (new_data) {
new_data->length = len;
new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -1015,8 +1015,7 @@ void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
/* No handling needed */
IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
iwl_get_cmd_string(priv->trans,
- iwl_cmd_id(pkt->hdr.cmd,
- 0, 0)),
+ WIDE_ID(0, pkt->hdr.cmd)),
pkt->hdr.cmd);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index c17ab53fcd8f..33aae639ad37 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -4,6 +4,7 @@
* Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
+#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -19,6 +20,30 @@ const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
0xDD, 0x26, 0xB5, 0xFD);
IWL_EXPORT_SYMBOL(iwl_rfi_guid);
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ {}
+};
+
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
acpi_handle *ret_handle)
{
@@ -242,7 +267,7 @@ found:
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd)
+ union iwl_tas_config_cmd *cmd, int fw_ver)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -268,10 +293,18 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
ACPI_WTAS_ENABLE_IEC_POS;
+ u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
+
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- cmd->override_tas_iec = cpu_to_le16(override_iec);
- cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
+ if (fw_ver <= 3) {
+ cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
+ cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
+ } else {
+ cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
+ cmd->v4.override_tas_iec = (u8)override_iec;
+ cmd->v4.enable_tas_iec = (u8)enabled_iec;
+ }
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -297,7 +330,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->block_list_size = cpu_to_le32(block_list_size);
+ cmd->v4.block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
@@ -319,7 +352,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->block_list_array[i] = cpu_to_le32(country);
+ cmd->v4.block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -529,8 +562,8 @@ IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
- bool enabled;
int ret, tbl_rev;
+ u32 flags;
u8 num_chains, num_sub_bands;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
@@ -596,7 +629,8 @@ read_table:
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
- enabled = !!(wifi_pkg->package.elements[1].integer.value);
+ flags = wifi_pkg->package.elements[1].integer.value;
+ fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS;
/* position of the actual table */
table = &wifi_pkg->package.elements[2];
@@ -604,7 +638,8 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
+ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
num_chains, num_sub_bands);
out_free:
kfree(data);
@@ -962,3 +997,181 @@ __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
+
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data, *flags;
+ int i, j, ret, tbl_rev, num_sub_bands = 0;
+ int idx = 2;
+
+ fwrt->ppag_flags = 0;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev == 1 || tbl_rev == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ IWL_DEBUG_RADIO(fwrt,
+ "Reading PPAG table v2 (tbl_rev=%d)\n",
+ tbl_rev);
+ goto read_table;
+ } else {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ /* try to read ppag table revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
+
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n");
+ goto read_table;
+ }
+
+read_table:
+ fwrt->ppag_ver = tbl_rev;
+ flags = &wifi_pkg->package.elements[1];
+
+ if (flags->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
+
+ if (!fwrt->ppag_flags) {
+ ret = 0;
+ goto out_free;
+ }
+
+ /*
+ * read, verify gain values and save them into the PPAG table.
+ * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
+ * following sub-bands to High-Band (5GHz).
+ */
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ union acpi_object *ent;
+
+ ent = &wifi_pkg->package.elements[idx++];
+ if (ent->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
+
+ if ((j == 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
+ (j != 0 &&
+ (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
+ fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
+ fwrt->ppag_flags = 0;
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ }
+
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+ if (!fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d but FW supports v1, sending truncated table\n",
+ fwrt->ppag_ver);
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else if (cmd_ver == 2 || cmd_ver == 3) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+ } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 22b3c665f91a..6f361c59106f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -77,6 +77,8 @@
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
+#define ACPI_WTAS_USA_UHB_MSK BIT(16)
+#define ACPI_WTAS_USA_UHB_POS 16
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
@@ -89,6 +91,11 @@
#define ACPI_PPAG_MAX_LB 24
#define ACPI_PPAG_MIN_HB -16
#define ACPI_PPAG_MAX_HB 40
+#define ACPI_PPAG_MASK 3
+#define IWL_PPAG_ETSI_MASK BIT(0)
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+#define IWL_REDUCE_POWER_FLAGS_POS 1
/*
* The profile for revision 2 is a superset of revision 1, which is in
@@ -126,7 +133,8 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_6E = 3,
DSM_FUNC_11AX_ENABLEMENT = 6,
DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
};
enum iwl_dsm_values_srd {
@@ -213,10 +221,17 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd);
+ union iwl_tas_config_cmd *cmd, int fw_ver);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -294,7 +309,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- struct iwl_tas_config_cmd_v3 *cmd)
+ union iwl_tas_config_cmd *cmd, int fw_ver)
{
return -ENOENT;
}
@@ -304,6 +319,22 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt
return 0;
}
+static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ return -ENOENT;
+}
+
+static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ return false;
+}
+
#endif /* CONFIG_ACPI */
static inline union acpi_object *
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 35b8856e511f..c78d2f1c722c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -323,14 +323,6 @@ enum iwl_legacy_cmds {
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
/**
- * @DC2DC_CONFIG_CMD:
- * Set/Get DC2DC frequency tune
- * Command is &struct iwl_dc2dc_config_cmd,
- * response is &struct iwl_dc2dc_config_resp
- */
- DC2DC_CONFIG_CMD = 0x83,
-
- /**
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
*/
NVM_ACCESS_CMD = 0x88,
@@ -608,6 +600,11 @@ enum iwl_system_subcmd_ids {
* @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd
*/
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
+
+ /**
+ * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
+ */
+ RFI_DEACTIVATE_NOTIF = 0xff,
};
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 1ab92f62c414..087354b3c308 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id {
DCDC_FREQ_TUNE_SET = 0x2,
}; /* MARKER_ID_API_E_VER_1 */
-/**
- * struct iwl_dc2dc_config_cmd - configure dc2dc values
- *
- * (DC2DC_CONFIG_CMD = 0x83)
- *
- * Set/Get & configure dc2dc values.
- * The command always returns the current dc2dc values.
- *
- * @flags: set/get dc2dc
- * @enable_low_power_mode: not used.
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_cmd {
- __le32 flags;
- __le32 enable_low_power_mode; /* not used */
- __le32 dc2dc_freq_tune0;
- __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
-
-/**
- * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
- *
- * Current dc2dc values returned by the FW.
- *
- * @dc2dc_freq_tune0: frequency divider - digital domain
- * @dc2dc_freq_tune1: frequency divider - analog domain
- */
-struct iwl_dc2dc_config_resp {
- __le32 dc2dc_freq_tune0;
- __le32 dc2dc_freq_tune1;
-} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
-
#endif /* __iwl_fw_api_config_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 89236f42c5a4..43619acc29fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -42,7 +42,7 @@ enum iwl_data_path_subcmd_ids {
RFH_QUEUE_CONFIG_CMD = 0xD,
/**
- * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+ * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
*/
TLC_MNG_CONFIG_CMD = 0xF,
@@ -58,6 +58,20 @@ enum iwl_data_path_subcmd_ids {
CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
/**
+ * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX
+ * blockack session, uses &struct iwl_rx_baid_cfg_cmd for the
+ * command, and &struct iwl_rx_baid_cfg_resp as a response.
+ */
+ RX_BAID_ALLOCATION_CONFIG_CMD = 0x16,
+
+ /**
+ * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal
+ * command, uses &struct iwl_scd_queue_cfg_cmd and the response
+ * is (same as before) &struct iwl_tx_queue_cfg_rsp.
+ */
+ SCD_QUEUE_CONFIG_CMD = 0x17,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
@@ -257,4 +271,136 @@ struct iwl_rlc_config_cmd {
u8 reserved[3];
} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * enum iwl_rx_baid_action - BAID allocation/config action
+ * @IWL_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+enum iwl_rx_baid_action {
+ IWL_RX_BAID_ACTION_ADD,
+ IWL_RX_BAID_ACTION_MODIFY,
+ IWL_RX_BAID_ACTION_REMOVE,
+}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwl_rx_baid_cfg_cmd_alloc {
+ __le32 sta_id_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le16 ssn;
+ __le16 win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwl_rx_baid_cfg_cmd_modify {
+ __le32 old_sta_id_mask;
+ __le32 new_sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove_v1 {
+ __le32 baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwl_rx_baid_cfg_cmd_remove {
+ __le32 sta_id_mask;
+ __le32 tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwl_rx_baid_action
+ */
+struct iwl_rx_baid_cfg_cmd {
+ __le32 action;
+ union {
+ struct iwl_rx_baid_cfg_cmd_alloc alloc;
+ struct iwl_rx_baid_cfg_cmd_modify modify;
+ struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1;
+ struct iwl_rx_baid_cfg_cmd_remove remove;
+ }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwl_rx_baid_cfg_resp {
+ __le32 baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * enum iwl_scd_queue_cfg_operation - scheduler queue operation
+ * @IWL_SCD_QUEUE_ADD: allocate a new queue
+ * @IWL_SCD_QUEUE_REMOVE: remove a queue
+ * @IWL_SCD_QUEUE_MODIFY: modify a queue
+ */
+enum iwl_scd_queue_cfg_operation {
+ IWL_SCD_QUEUE_ADD = 0,
+ IWL_SCD_QUEUE_REMOVE = 1,
+ IWL_SCD_QUEUE_MODIFY = 2,
+};
+
+/**
+ * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.queue: queue ID for removal
+ * @u.modify.sta_mask: new station mask for modify
+ * @u.modify.queue: queue ID to modify
+ */
+struct iwl_scd_queue_cfg_cmd {
+ __le32 operation;
+ union {
+ struct {
+ __le32 sta_mask;
+ u8 tid;
+ u8 reserved[3];
+ __le32 flags;
+ __le32 cb_size;
+ __le64 bc_dram_addr;
+ __le64 tfdq_dram_addr;
+ } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+ struct {
+ __le32 queue;
+ } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+ struct {
+ __le32 sta_mask;
+ __le32 queue;
+ } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 456b7eaac570..52bf96585fc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -11,7 +11,8 @@
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
-#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
+#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
+#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
/**
* struct iwl_fw_ini_hcmd
@@ -249,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv {
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
-* struct iwl_fw_ini_conf_tlv - preset configuration TLV
+* struct iwl_fw_ini_addr_val - Address and value to set it to
*
* @address: the base address
* @value: value to set at address
-
*/
struct iwl_fw_ini_addr_val {
__le32 address;
@@ -475,6 +475,7 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
+ * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -482,6 +483,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
+ IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
};
/**
@@ -496,4 +498,31 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
};
+
+/**
+ * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags
+ *
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
+ * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ */
+enum iwl_fw_ini_dump_policy {
+ IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
+ IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
+ IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
+
+};
+
+/**
+ * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
+ *
+ * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
+ * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_VERBOSE : dump all regions
+ */
+enum iwl_fw_ini_dump_type {
+ IWL_FW_INI_DUMP_BRIEF,
+ IWL_FW_INI_DUMP_MEDIUM,
+ IWL_FW_INI_DUMP_VERBOSE,
+};
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 029ae64bf2b2..6255257ddebe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -43,6 +43,12 @@ enum iwl_debug_cmds {
*/
BUFFER_ALLOCATION = 0x8,
/**
+ * @FW_DUMP_COMPLETE_CMD:
+ * sends command to fw once dump collection completed
+ * &struct iwl_dbg_dump_complete_cmd
+ */
+ FW_DUMP_COMPLETE_CMD = 0xB,
+ /**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
@@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd {
__le32 enabled_severities;
} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */
+/**
+ * struct iwl_dbg_dump_complete_cmd - dump complete cmd
+ *
+ * @tp: timepoint whose dump has completed
+ * @tp_data: timepoint data
+ */
+struct iwl_dbg_dump_complete_cmd {
+ __le32 tp;
+ __le32 tp_data;
+} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index d088c820b1a9..712532f17630 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids {
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
+ /**
+ * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd
+ */
+ CANCEL_CHANNEL_SWITCH_CMD = 0x6,
/**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
@@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids {
* @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
*/
CHANNEL_SWITCH_START_NOTIF = 0xFF,
+
+ /**
+ *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif
+ */
+ CHANNEL_SWITCH_ERROR_NOTIF = 0xF9,
};
#define IWL_P2P_NOA_DESC_COUNT (2)
@@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+#define CS_ERR_COUNT_ERROR BIT(0)
+#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1)
+#define CS_ERR_LONG_TX_BLOCK BIT(2)
+#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3)
+
+/**
+ * struct iwl_channel_switch_error_notif - Channel switch error notification
+ *
+ * @mac_id: the mac for which the ucode sends the notification for
+ * @csa_err_mask: mask of channel switch error that can occur
+ */
+struct iwl_channel_switch_error_notif {
+ __le32 mac_id;
+ __le32 csa_err_mask;
+} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command
+ *
+ * @mac_id: the mac that should cancel the channel switch
+ */
+struct iwl_cancel_channel_switch_cmd {
+ __le32 mac_id;
+} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */
+
/**
* struct iwl_chan_switch_te_cmd - Channel Switch Time Event command
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 11f0bd283e49..9b7caf968346 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_HE_CHANNEL_BW_INDX 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
/**
- * struct iwl_he_pkt_ext - QAM thresholds
+ * struct iwl_he_pkt_ext_v1 - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
@@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations {
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
* (0-low_th, 1-high_th)
*/
-struct iwl_he_pkt_ext {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
-} __packed; /* PKT_EXT_DOT11AX_API_S */
+struct iwl_he_pkt_ext_v1 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
+
+/**
+ * struct iwl_he_pkt_ext_v2 - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ * QAM_th2 <= QAM_tx --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ * For each Nss/Bw define 2 QAM thrsholds (0..5)
+ * For rates below the low_th, no need for PPE
+ * For rates between low_th and high_th, need 8us PPE
+ * For rates equal or higher then the high_th, need 16us PPE
+ * Nss (0-siso, 1-mimo2) x
+ * BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x
+ * (0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext_v2 {
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
* enum iwl_he_sta_ctxt_flags - HE STA context flags
@@ -464,6 +490,11 @@ struct iwl_he_pkt_ext {
* @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are
* not allowed (as there are OBSS that might classify such transmissions as
* radar pulses).
+ * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change
+ * of threshold
+ * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid
+ * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be
+ * extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM.
*/
enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
@@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13),
STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14),
+ STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15),
+ STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16),
+ STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17),
};
/**
@@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
- struct iwl_he_pkt_ext pkt_ext;
+ struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
/**
- * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
@@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 {
* @bssid_count: actual number of VAPs in the MultiBSS Set
* @reserved4: alignment
*/
-struct iwl_he_sta_context_cmd {
+struct iwl_he_sta_context_cmd_v2 {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
@@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd {
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
- struct iwl_he_pkt_ext pkt_ext;
+ struct iwl_he_pkt_ext_v1 pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
@@ -643,6 +677,81 @@ struct iwl_he_sta_context_cmd {
} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
+ * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @puncture_mask: puncture mask for EHT
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
+ */
+struct iwl_he_sta_context_cmd_v3 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext_v2 pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 puncture_mask;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
+
+/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
* @bssid: the BSSID to sniff for
* @reserved1: reserved for dword alignment
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 4949fcf85257..91bfde6d5367 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -420,6 +420,30 @@ struct iwl_tas_config_cmd_v3 {
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
+ * struct iwl_tas_config_cmd_v3 - configures the TAS
+ * @block_list_size: size of relevant field in block_list_array
+ * @block_list_array: list of countries where TAS must be disabled
+ * @override_tas_iec: indicates whether to override default value of IEC regulatory
+ * @enable_tas_iec: in case override_tas_iec is set -
+ * indicates whether IEC regulatory is enabled or disabled
+ * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA
+ * @reserved: reserved
+*/
+struct iwl_tas_config_cmd_v4 {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+ u8 reserved;
+} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
+
+union iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_v2 v2;
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+};
+/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
@@ -515,6 +539,32 @@ struct iwl_lari_config_change_cmd_v5 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
/**
+ * struct iwl_lari_config_change_cmd_v6 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ * Each bit represents a country or region to activate, according to the BIOS
+ * definitions.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be disabled
+ */
+struct iwl_lari_config_change_cmd_v6 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index c04f2521fcb3..b1b9c29859c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp {
/**
* struct ct_kill_notif - CT-kill entry notification
+ * This structure represent both versions of this notification.
*
* @temperature: the current temperature in celsius
- * @reserved: reserved
+ * @dts: only in v2: DTS that trigger the CT Kill bitmap:
+ * bit 0: ToP master
+ * bit 1: PA chain A master
+ * bit 2: PA chain B master
+ * bit 3: ToP slave
+ * bit 4: PA chain A slave
+ * bit 5: PA chain B slave)
+ * bits 6,7: reserved (set to 0)
+ * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW)
*/
struct ct_kill_notif {
__le16 temperature;
- __le16 reserved;
-} __packed; /* GRP_PHY_CT_KILL_NTF */
+ u8 dts;
+ u8 scheme;
+} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum ctdp_cmd_operation - CTDP command operations
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 81318208f2f6..f92cac1da764 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -340,7 +340,7 @@ struct iwl_dev_tx_power_cmd_v5 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
/**
- * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6
* @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
@@ -361,6 +361,28 @@ struct iwl_dev_tx_power_cmd_v6 {
} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
/**
+ * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ */
+struct iwl_dev_tx_power_cmd_v7 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
* @v3: version 3 part of the command
@@ -375,6 +397,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v4 v4;
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
+ struct iwl_dev_tx_power_cmd_v7 v7;
};
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
index c678b9aa9b55..1a84a4081e7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
@@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd {
__le32 status;
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
+/**
+ * struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm
+ *
+ * @reason: used only for a log message
+ */
+struct iwl_rfi_deactivate_notif {
+ __le32 reason;
+} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */
#endif /* __iwl_fw_api_rfi_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 4a7723eb8c1d..687f804c46b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@@ -133,7 +133,7 @@ enum IWL_TLC_MCS_PER_BW {
};
/**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v3 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
@@ -168,7 +168,7 @@ struct iwl_tlc_config_cmd_v3 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
- * struct tlc_config_cmd - TLC configuration
+ * struct iwl_tlc_config_cmd_v4 - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index e73cc7380a26..ecc6706f66ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -296,8 +296,7 @@ struct iwl_tx_cmd_gen2 {
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
- * @ttl: time to live - packet lifetime limit. The FW should drop if
- * passed.
+ * @reserved: reserved
* @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen3 {
@@ -306,7 +305,7 @@ struct iwl_tx_cmd_gen3 {
__le32 offload_assist;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
- __le64 ttl;
+ u8 reserved[8];
struct ieee80211_hdr hdr[];
} __packed; /* TX_CMD_API_S_VER_8,
TX_CMD_API_S_VER_10 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 8b3a00df41da..e018946310d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
+#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
+#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
#define IWL_CMD_QUEUE_SIZE 32
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 7ad9cee925da..abf49022edbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -12,7 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
-
+#include "iwl-fh.h"
/**
* struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
*
@@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
iwl_trans_release_nic_access(fwrt->trans);
}
-#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
-#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
-
struct iwl_prph_range {
u32 start, end;
};
@@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data {
static int
iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
@@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1175,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
}
static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1195,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range;
u32 page_size;
@@ -1204,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
idx++;
if (!fwrt->trans->trans_cfg->gen2)
- return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
+ return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
page_size = fwrt->trans->init_dram.paging[idx].size;
@@ -1220,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1239,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1307,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1442,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1509,7 +1506,7 @@ out:
static int
iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_err_table *err_table = &reg->err_table;
@@ -1528,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_region_special_device_memory *special_mem =
@@ -1549,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
static int
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
@@ -1561,8 +1558,6 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
return -EBUSY;
range->range_data_size = reg->dev_addr.size;
- iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
- DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
@@ -1579,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, int idx)
+ void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt;
@@ -1598,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ /* read the IMR memory and DMA it to SRAM */
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr;
+ u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte;
+ u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+ u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes;
+
+ range->range_data_size = cpu_to_le32(size_to_dump);
+ if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr,
+ imr_curr_addr, size_to_dump)) {
+ IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n");
+ return -1;
+ }
+
+ fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump;
+ fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump;
+
+ iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data,
+ size_to_dump);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static void *
iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_error_dump *dump = data;
@@ -1677,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@@ -1688,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
@@ -1697,9 +1719,20 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
}
static void *
+iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ &fwrt->trans->cfg->mon_dbgi_regs);
+}
+
+static void *
iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_err_table_dump *dump = data;
@@ -1713,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
static void *
iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data)
+ void *data, u32 data_len)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_special_device_memory *dump = data;
@@ -1725,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
return dump->data;
}
+static void *
+iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data, u32 data_len)
+{
+ struct iwl_fw_ini_error_dump *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+
+ return dump->data;
+}
+
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1784,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt,
return 1;
}
+static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ /* range is total number of pages need to copied from
+ *IMR memory to SRAM and later from SRAM to DRAM
+ */
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return 0;
+ }
+
+ return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size));
+}
+
static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1861,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->dev_addr.size);
+ u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data);
+
+ if (!size || !ranges)
+ return 0;
+
+ return sizeof(struct iwl_fw_ini_monitor_dump) + ranges *
+ (size + sizeof(struct iwl_fw_ini_error_dump_range));
+}
+
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1948,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32
+iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ u32 size = 0;
+ u32 ranges = 0;
+ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable;
+ u32 imr_size = fwrt->trans->dbg.imr_data.imr_size;
+ u32 sram_size = fwrt->trans->dbg.imr_data.sram_size;
+
+ if (imr_enable == 0 || imr_size == 0 || sram_size == 0) {
+ IWL_DEBUG_INFO(fwrt,
+ "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n",
+ imr_enable, imr_size, sram_size);
+ return size;
+ }
+ size = imr_size;
+ ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data);
+ if (!size && !ranges) {
+ IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges);
+ return 0;
+ }
+ size += sizeof(struct iwl_fw_ini_error_dump) +
+ ranges * sizeof(struct iwl_fw_ini_error_dump_range);
+ return size;
+}
+
/**
* struct iwl_dump_ini_mem_ops - ini memory dump operations
* @get_num_of_ranges: returns the number of memory ranges in the region.
@@ -1964,10 +2070,10 @@ struct iwl_dump_ini_mem_ops {
struct iwl_dump_ini_region_data *reg_data);
void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *data);
+ void *data, u32 data_len);
int (*fill_range)(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
- void *range, int idx);
+ void *range, u32 range_len, int idx);
};
/**
@@ -1990,24 +2096,53 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_fw_ini_error_dump_data *tlv;
struct iwl_fw_ini_error_dump_header *header;
u32 type = reg->type;
- u32 id = le32_to_cpu(reg->id);
+ u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK);
u32 num_of_ranges, i, size;
- void *range;
-
- /*
- * The higher part of the ID from 2 is irrelevant for
- * us, so mask it out.
- */
- if (le32_to_cpu(reg->hdr.version) >= 2)
- id &= IWL_FW_INI_REGION_V2_MASK;
+ u8 *range;
+ u32 free_size;
+ u64 header_size;
+ u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE;
+
+ IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n",
+ dump_policy, id, type);
+
+ if (le32_to_cpu(reg->hdr.version) >= 2) {
+ u32 dp = le32_get_bits(reg->id,
+ IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
+ if (dump_policy == IWL_FW_INI_DUMP_VERBOSE &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM &&
+ !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF &&
+ !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: no dump - type %d and policy mismatch=%d\n",
+ dump_policy, dp);
+ return 0;
+ }
+ }
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
- !ops->fill_range)
+ !ops->fill_range) {
+ IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n");
return 0;
+ }
size = ops->get_size(fwrt, reg_data);
- if (!size)
+
+ if (size < sizeof(*header)) {
+ IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n");
return 0;
+ }
entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size);
if (!entry)
@@ -2022,9 +2157,6 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
tlv->reserved = reg->reserved;
tlv->len = cpu_to_le32(size);
- IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id,
- type);
-
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
header = (void *)tlv->data;
@@ -2033,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
- range = ops->fill_mem_hdr(fwrt, reg_data, header);
+ free_size = size;
+ range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size);
if (!range) {
IWL_ERR(fwrt,
"WRT: Failed to fill region header: id=%d, type=%d\n",
@@ -2041,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
goto out_err;
}
+ header_size = range - (u8 *)header;
+
+ if (WARN(header_size > free_size,
+ "header size %llu > free_size %d",
+ header_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_mem_hdr used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= header_size;
+
for (i = 0; i < num_of_ranges; i++) {
- int range_size = ops->fill_range(fwrt, reg_data, range, i);
+ int range_size = ops->fill_range(fwrt, reg_data, range,
+ free_size, i);
if (range_size < 0) {
IWL_ERR(fwrt,
@@ -2050,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
id, type);
goto out_err;
}
+
+ if (WARN(range_size > free_size, "range_size %d > free_size %d",
+ range_size, free_size)) {
+ IWL_ERR(fwrt,
+ "WRT: fill_raged used more than given free_size\n");
+ goto out_err;
+ }
+
+ free_size -= range_size;
range = range + range_size;
}
@@ -2240,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_csr_iter,
},
- [IWL_FW_INI_REGION_DRAM_IMR] = {},
+ [IWL_FW_INI_REGION_DRAM_IMR] = {
+ .get_num_of_ranges = iwl_dump_ini_imr_ranges,
+ .get_size = iwl_dump_ini_imr_get_size,
+ .fill_mem_hdr = iwl_dump_ini_imr_fill_header,
+ .fill_range = iwl_dump_ini_imr_iter,
+ },
[IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
.get_size = iwl_dump_ini_mem_get_size,
@@ -2255,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
[IWL_FW_INI_REGION_DBGI_SRAM] = {
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
- .get_size = iwl_dump_ini_mem_get_size,
- .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .get_size = iwl_dump_ini_mon_dbgi_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
.fill_range = iwl_dump_ini_dbgi_sram_iter,
},
};
@@ -2270,6 +2430,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data reg_data = {
.dump_data = dump_data,
};
+ struct iwl_dump_ini_region_data imr_reg_data = {
+ .dump_data = dump_data,
+ };
int i;
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
@@ -2305,10 +2468,32 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
+ /*
+ * DRAM_IMR can be collected only for FW/HW error timepoint
+ * when fw is not alive. In addition, it must be collected
+ * lastly as it overwrites SRAM that can possibly contain
+ * debug data which also need to be collected.
+ */
+ if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
+ if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+ tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+ imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ else
+ IWL_INFO(fwrt,
+ "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
+ tp_id);
+ /* continue to next region */
+ continue;
+ }
+
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
+ /* collect DRAM_IMR region in the last */
+ if (imr_reg_data.reg_tlv)
+ size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size)
size += iwl_dump_ini_info(fwrt, trigger, list);
@@ -2444,7 +2629,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data)
static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data)
{
- struct list_head dump_list = LIST_HEAD_INIT(dump_list);
+ LIST_HEAD(dump_list);
struct scatterlist *sg_dump_data;
u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list);
@@ -2589,7 +2774,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC;
}
- desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+ desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
@@ -2685,6 +2870,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data)
+{
+ struct iwl_dbg_dump_complete_cmd hcmd_data;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD),
+ .data[0] = &hcmd_data,
+ .len[0] = sizeof(hcmd_data),
+ };
+
+ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ return;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) {
+ hcmd_data.tp = cpu_to_le32(timepoint);
+ hcmd_data.tp_data = cpu_to_le32(timepoint_data);
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ }
+}
+
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
@@ -2693,10 +2900,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
-
+ u32 policy;
+ u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
+ if (!dump_data->trig) {
+ IWL_ERR(fwrt, "dump trigger data is not set\n");
+ goto out;
+ }
+
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n");
goto out;
@@ -2719,6 +2932,13 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+ policy = le32_to_cpu(dump_data->trig->apply_policy);
+ time_point = le32_to_cpu(dump_data->trig->time_point);
+
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
@@ -2777,10 +2997,10 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
"WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
tp_id, (u32)(delay / USEC_PER_MSEC));
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
-
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
+ else
+ schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
return 0;
}
@@ -2795,9 +3015,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* assumes the op mode mutex is locked in dump_start since
* iwl_fw_dbg_collect_sync can't run in parallel
*/
- if (fwrt->ops && fwrt->ops->dump_start &&
- fwrt->ops->dump_start(fwrt->ops_ctx))
- return;
+ if (fwrt->ops && fwrt->ops->dump_start)
+ fwrt->ops->dump_start(fwrt->ops_ctx);
iwl_fw_dbg_collect_sync(fwrt, wks->idx);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 8c3c890066b0..be7806407de8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
+void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint,
+ u32 timepoint_data);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index a152ce306475..43e997283db0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -150,7 +150,7 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
{
struct iwl_dbg_host_event_cfg_cmd event_cfg;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0),
+ .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG),
.flags = CMD_ASYNC,
.data[0] = &event_cfg,
.len[0] = sizeof(event_cfg),
@@ -358,7 +358,7 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
ver = &fw->ucode_capa.cmd_versions[state->pos];
- cmd_id = iwl_cmd_id(ver->cmd, ver->group, 0);
+ cmd_id = WIDE_ID(ver->group, ver->cmd);
seq_printf(seq, " 0x%04x:\n", cmd_id);
seq_printf(seq, " name: %s\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index efc6540d31af..5679a78758be 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -119,7 +119,7 @@ enum iwl_ucode_tlv_type {
struct iwl_ucode_tlv {
__le32 type; /* see above */
__le32 length; /* not including type/length fields */
- u8 data[0];
+ u8 data[];
};
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
@@ -310,7 +310,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
- * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
@@ -368,6 +367,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* reset flow
* @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC
* channels even when these are not enabled.
+ * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
+ * complete to FW.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -386,7 +387,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
- IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
@@ -419,6 +419,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -453,6 +454,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
+ IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
@@ -514,34 +516,6 @@ enum iwl_fw_phy_cfg {
FW_PHY_CFG_SHARED_CLK = BIT(31),
};
-#define IWL_UCODE_MAX_CS 1
-
-/**
- * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
- * @cipher: a cipher suite selector
- * @flags: cipher scheme flags (currently reserved for a future use)
- * @hdr_len: a size of MPDU security header
- * @pn_len: a size of PN
- * @pn_off: an offset of pn from the beginning of the security header
- * @key_idx_off: an offset of key index byte in the security header
- * @key_idx_mask: a bit mask of key_idx bits
- * @key_idx_shift: bit shift needed to get key_idx
- * @mic_len: mic length in bytes
- * @hw_cipher: a HW cipher index used in host commands
- */
-struct iwl_fw_cipher_scheme {
- __le32 cipher;
- u8 flags;
- u8 hdr_len;
- u8 pn_len;
- u8 pn_off;
- u8 key_idx_off;
- u8 key_idx_mask;
- u8 key_idx_shift;
- u8 mic_len;
- u8 hw_cipher;
-} __packed;
-
enum iwl_fw_dbg_reg_operator {
CSR_ASSIGN,
CSR_SETBIT,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index 530674a35eeb..b7deca05a953 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -2,13 +2,16 @@
/*
* Copyright(c) 2019 - 2021 Intel Corporation
*/
-
+#include <fw/api/commands.h>
#include "img.h"
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
+ /* prior to LONG_GROUP, we never used this CMD version API */
+ u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
+ u8 cmd = iwl_cmd_opcode(cmd_id);
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index fa7b1780064c..f878ac508801 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -134,16 +134,6 @@ struct iwl_fw_paging {
};
/**
- * struct iwl_fw_cscheme_list - a cipher scheme list
- * @size: a number of entries
- * @cs: cipher scheme entries
- */
-struct iwl_fw_cscheme_list {
- u8 size;
- struct iwl_fw_cipher_scheme cs[];
-} __packed;
-
-/**
* enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware
* @IWL_FW_MVM: MVM firmware
@@ -197,7 +187,6 @@ struct iwl_dump_exclude {
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
* @type: firmware type (&enum iwl_fw_type)
- * @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
* @phy_integration_ver: PHY integration version string
@@ -228,7 +217,6 @@ struct iwl_fw {
enum iwl_fw_type type;
- struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg dbg;
@@ -275,7 +263,7 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 139ece879fab..135bd48bfe9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
{
struct iwl_soc_configuration_cmd cmd = {};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0),
+ .id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
@@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
- if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 58ca3849d1f3..945bc4160cc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
}
memcpy(page_address(block->fw_paging_block),
- image->sec[sec_idx].data + offset, len);
+ (const u8 *)image->sec[sec_idx].data + offset, len);
block->fw_offs = image->sec[sec_idx].offset + offset;
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
@@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD),
.len = { sizeof(paging_cmd), },
.data = { &paging_cmd, },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 7d4aa398729a..b6d3ac6ed440 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -33,7 +33,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u32 sha1 = 0;
u16 mac_type = 0, rf_id = 0;
u8 *pnvm_data = NULL, *tmp;
@@ -47,7 +47,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -70,7 +70,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
break;
}
- sha1 = le32_to_cpup((__le32 *)data);
+ sha1 = le32_to_cpup((const __le32 *)data);
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
@@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
if (hw_match)
break;
- mac_type = le16_to_cpup((__le16 *)data);
- rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
+ mac_type = le16_to_cpup((const __le16 *)data);
+ rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16)));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
@@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
- struct iwl_pnvm_section *section = (void *)data;
+ const struct iwl_pnvm_section *section = (const void *)data;
u32 data_len = tlv_len - sizeof(*section);
IWL_DEBUG_FW(trans,
@@ -107,7 +107,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
tlv_len);
/* TODO: remove, this is a deprecated separator */
- if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
+ if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) {
IWL_DEBUG_FW(trans, "Ignoring separator.\n");
break;
}
@@ -173,7 +173,7 @@ out:
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
@@ -181,7 +181,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -193,8 +193,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- struct iwl_sku_id *sku_id =
- (void *)(data + sizeof(*tlv));
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 3cb0ddbe3ab2..d3cb1ae68a96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -16,7 +16,7 @@
#include "fw/acpi.h"
struct iwl_fw_runtime_ops {
- int (*dump_start)(void *ctx);
+ void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
@@ -163,6 +163,7 @@ struct iwl_fw_runtime {
u32 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
+ u8 reduced_power_flags;
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index f2f1789f470d..3f1272014daf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD);
else
cmd.id = SHARED_MEM_CFG;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index bd82c24811c8..23b1d689ba7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -69,7 +69,7 @@ out:
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
const u8 *data, size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
u8 *reduce_power_data = NULL, *tmp;
u32 size = 0;
@@ -79,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -154,7 +154,7 @@ out:
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len)
{
- struct iwl_ucode_tlv *tlv;
+ const struct iwl_ucode_tlv *tlv;
void *sec_data;
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
@@ -163,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
u32 tlv_len, tlv_type;
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
@@ -175,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- struct iwl_sku_id *sku_id =
- (void *)(data + sizeof(*tlv));
+ const struct iwl_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e122b8b4e1fc..f5b556a103e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -260,6 +260,7 @@ enum iwl_cfg_trans_ltr_delay {
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
+ * @imr_enabled: use the IMR if supported.
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
@@ -274,7 +275,8 @@ struct iwl_cfg_trans_params {
integrated:1,
low_latency_xtal:1,
bisr_workaround:1,
- ltr_delay:2;
+ ltr_delay:2,
+ imr_enabled:1;
};
/**
@@ -343,8 +345,8 @@ struct iwl_fw_mon_regs {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
- * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
- * supports 256 BA aggregation
+ * @min_ba_txq_size: minimum number of slots required in a TX queue which
+ * based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
@@ -405,9 +407,10 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 gp2_reg_addr;
- u32 min_256_ba_txq_size;
+ u32 min_ba_txq_size;
const struct iwl_fw_mon_regs mon_dram_regs;
const struct iwl_fw_mon_regs mon_smem_regs;
+ const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@@ -433,6 +436,7 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
#define IWL_CFG_RF_TYPE_MR 0x110
+#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_ID_TH 0x1
@@ -489,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
extern const char iwl9162_name[];
@@ -509,6 +514,7 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
+extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -631,9 +637,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
+extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 5adf485db38e..b84884034c74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2018, 2020-2022 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -34,6 +34,7 @@ enum iwl_prph_scratch_mtr_format {
/**
* enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
* @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
* @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
* in hwm config.
@@ -55,6 +56,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
*/
enum iwl_prph_scratch_flags {
+ IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 8e10ba88afb3..3e1f011e93aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -534,6 +534,9 @@ enum {
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+/* This register is common for Tx and Rx, Rx queues start from 512 */
+#define HBUS_TARG_WRPTR_Q_SHIFT (16)
+#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
/**********************************************************
* CSR values
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index c73672d61356..866a33f49915 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
if (!node)
return -ENOMEM;
- memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
+ memcpy(&node->tlv, tlv, sizeof(node->tlv));
+ memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
return 0;
@@ -181,11 +182,11 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
/*
- * The higher part of the ID in from version 2 is irrelevant for
- * us, so mask it out.
+ * The higher part of the ID from version 2 is debug policy.
+ * The id will be only lsb 16 bits, so mask it out.
*/
if (le32_to_cpu(reg->hdr.version) >= 2)
- id &= IWL_FW_INI_REGION_V2_MASK;
+ id &= IWL_FW_INI_REGION_ID_MASK;
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
@@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EOPNOTSUPP;
}
+ if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
+ trans->dbg.imr_data.sram_addr =
+ le32_to_cpu(reg->internal_buffer.base_addr);
+ trans->dbg.imr_data.sram_size =
+ le32_to_cpu(reg->internal_buffer.size);
+ }
+
+
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
@@ -271,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
- struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
+ const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
u32 tp = le32_to_cpu(conf_set->time_point);
u32 type = le32_to_cpu(conf_set->set_type);
@@ -460,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
@@ -577,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
- if (!fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
@@ -762,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
{
- int ret, i, dram_alloc = 0;
- struct iwl_dram_info dram_info;
+ int ret, i;
+ bool dram_alloc = false;
struct iwl_dram_data *frags =
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
+ struct iwl_dram_info *dram_info;
+
+ if (!frags || !frags->block)
+ return;
+
+ dram_info = frags->block;
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
return;
- dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
- dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
+ dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
+ dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
- ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
+ ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
if (!ret)
- dram_alloc++;
+ dram_alloc = true;
else
IWL_WARN(fwrt,
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
i, ret);
}
- if (dram_alloc) {
- memcpy(frags->block, &dram_info, sizeof(dram_info));
- IWL_DEBUG_FW(fwrt, "block data after %016x\n",
- *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
- }
+
+ if (dram_alloc)
+ IWL_DEBUG_FW(fwrt, "block data after %08x\n",
+ dram_info->first_word);
+ else
+ memset(frags->block, 0, sizeof(*dram_info));
}
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
@@ -811,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
}
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
- struct list_head *config_list)
+ struct list_head *conf_list)
{
struct iwl_dbg_tlv_node *node;
- list_for_each_entry(node, config_list, list) {
+ list_for_each_entry(node, conf_list, list) {
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
u32 count, address, value;
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
@@ -861,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
struct iwl_dbgc1_info dram_info = {};
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
- __le64 dram_base_addr = cpu_to_le64(frags->physical);
- __le32 dram_size = cpu_to_le32(frags->size);
- u64 dram_addr = le64_to_cpu(dram_base_addr);
+ __le64 dram_base_addr;
+ __le32 dram_size;
+ u64 dram_addr;
u32 ret;
+ if (!frags)
+ break;
+
+ dram_base_addr = cpu_to_le64(frags->physical);
+ dram_size = cpu_to_le32(frags->size);
+ dram_addr = le64_to_cpu(dram_base_addr);
+
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
dram_base_addr, dram_size);
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index 79287708bd6e..128059ca77e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
@@ -10,6 +10,8 @@
#include <fw/file.h>
#include <fw/api/dbg-tlv.h>
+#define IWL_DBG_TLV_MAX_PRESET 15
+
/**
* struct iwl_dbg_tlv_node - debug TLV node
* @list: list of &struct iwl_dbg_tlv_node
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 6651e78b39ec..a2203f661321 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,14 +243,14 @@ struct iwl_firmware_pieces {
/* FW debug data parsed for driver usage */
bool dbg_dest_tlv_init;
- u8 *dbg_dest_ver;
+ const u8 *dbg_dest_ver;
union {
- struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
- struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+ const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
};
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+ const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
@@ -324,29 +324,6 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
pieces->img[type].sec[sec].offset = offset;
}
-static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
-{
- int i, j;
- struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
- struct iwl_fw_cipher_scheme *fwcs;
-
- if (len < sizeof(*l) ||
- len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
- return -EINVAL;
-
- for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
- fwcs = &l->cs[j];
-
- /* we skip schemes with zero cipher suite selector */
- if (!fwcs->cipher)
- continue;
-
- fw->cs[j++] = *fwcs;
- }
-
- return 0;
-}
-
/*
* Gets uCode section from tlv.
*/
@@ -356,13 +333,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
{
struct fw_img_parsing *img;
struct fw_sec *sec;
- struct fw_sec_parsing *sec_parse;
+ const struct fw_sec_parsing *sec_parse;
size_t alloc_size;
if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
return -1;
- sec_parse = (struct fw_sec_parsing *)data;
+ sec_parse = (const struct fw_sec_parsing *)data;
img = &pieces->img[type];
@@ -385,8 +362,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
{
- struct iwl_tlv_calib_data *def_calib =
- (struct iwl_tlv_calib_data *)data;
+ const struct iwl_tlv_calib_data *def_calib =
+ (const struct iwl_tlv_calib_data *)data;
u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
if (ucode_type >= IWL_UCODE_TYPE_MAX) {
IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
@@ -404,7 +381,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
- const struct iwl_ucode_api *ucode_api = (void *)data;
+ const struct iwl_ucode_api *ucode_api = (const void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i;
@@ -425,7 +402,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
struct iwl_ucode_capabilities *capa)
{
- const struct iwl_ucode_capa *ucode_capa = (void *)data;
+ const struct iwl_ucode_capa *ucode_capa = (const void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i;
@@ -457,7 +434,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
u32 api_ver, hdr_size, build;
char buildstr[25];
const u8 *src;
@@ -600,7 +577,7 @@ static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
sizeof(region->special_mem))
return;
- region = (void *)tlv->data;
+ region = (const void *)tlv->data;
addr = le32_to_cpu(region->special_mem.base_addr);
addr += le32_to_cpu(region->special_mem.offset);
addr &= ~FW_ADDR_CACHE_CONTROL;
@@ -655,7 +632,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
struct iwl_ucode_capabilities *capa,
bool *usniffer_images)
{
- struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
const struct iwl_ucode_tlv *tlv;
size_t len = ucode_raw->size;
const u8 *data;
@@ -704,8 +681,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
- tlv = (void *)data;
+ tlv = (const void *)data;
tlv_len = le32_to_cpu(tlv->length);
tlv_type = le32_to_cpu(tlv->type);
tlv_data = tlv->data;
@@ -762,7 +739,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->max_probe_length =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_PAN:
if (tlv_len)
@@ -783,7 +760,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
* will not work with the new firmware, or
* it'll not take advantage of new features.
*/
- capa->flags = le32_to_cpup((__le32 *)tlv_data);
+ capa->flags = le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_API_CHANGES_SET:
if (tlv_len != sizeof(struct iwl_ucode_api))
@@ -799,37 +776,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->init_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_evtlog_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
pieces->inst_errlog_ptr =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
if (tlv_len)
@@ -858,7 +835,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->standard_phy_calibration_size =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -884,7 +861,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PHY_SKU:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+ drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
drv->fw.valid_tx_ant = (drv->fw.phy_config &
FW_PHY_CFG_TX_CHAIN) >>
FW_PHY_CFG_TX_CHAIN_POS;
@@ -911,7 +888,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
num_of_cpus =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
if (num_of_cpus == 2) {
drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
@@ -925,18 +902,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
break;
- case IWL_UCODE_TLV_CSCHEME:
- if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
- goto invalid_tlv_len;
- break;
case IWL_UCODE_TLV_N_SCAN_CHANNELS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
capa->n_scan_channels =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_VERSION: {
- __le32 *ptr = (void *)tlv_data;
+ const __le32 *ptr = (const void *)tlv_data;
u32 major, minor;
u8 local_comp;
@@ -960,15 +933,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
- struct iwl_fw_dbg_dest_tlv *dest = NULL;
- struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+ const struct iwl_fw_dbg_dest_tlv *dest = NULL;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
u8 mon_mode;
- pieces->dbg_dest_ver = (u8 *)tlv_data;
+ pieces->dbg_dest_ver = (const u8 *)tlv_data;
if (*pieces->dbg_dest_ver == 1) {
- dest = (void *)tlv_data;
+ dest = (const void *)tlv_data;
} else if (*pieces->dbg_dest_ver == 0) {
- dest_v1 = (void *)tlv_data;
+ dest_v1 = (const void *)tlv_data;
} else {
IWL_ERR(drv,
"The version is %d, and it is invalid\n",
@@ -1009,7 +982,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_CONF: {
- struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+ const struct iwl_fw_dbg_conf_tlv *conf =
+ (const void *)tlv_data;
if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
@@ -1043,8 +1017,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
- struct iwl_fw_dbg_trigger_tlv *trigger =
- (void *)tlv_data;
+ const struct iwl_fw_dbg_trigger_tlv *trigger =
+ (const void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
@@ -1075,7 +1049,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
drv->fw.dbg.dump_mask =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER:
@@ -1087,7 +1061,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_PAGING:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+ paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
IWL_DEBUG_FW(drv,
"Paging: paging enabled (size = %u bytes)\n",
@@ -1117,8 +1091,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
/* ignored */
break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
- (void *)tlv_data;
+ const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+ (const void *)tlv_data;
size_t size;
struct iwl_fw_dbg_mem_seg_tlv *n;
@@ -1146,10 +1120,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
- struct {
+ const struct {
__le32 buf_addr;
__le32 buf_size;
- } *recov_info = (void *)tlv_data;
+ } *recov_info = (const void *)tlv_data;
if (tlv_len != sizeof(*recov_info))
goto invalid_tlv_len;
@@ -1160,10 +1134,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
break;
case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
- struct {
+ const struct {
u8 version[32];
u8 sha1[20];
- } *fseq_ver = (void *)tlv_data;
+ } *fseq_ver = (const void *)tlv_data;
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
@@ -1174,19 +1148,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_NUM_STATIONS:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
- if (le32_to_cpup((__le32 *)tlv_data) >
+ if (le32_to_cpup((const __le32 *)tlv_data) >
IWL_MVM_STATION_COUNT_MAX) {
IWL_ERR(drv,
"%d is an invalid number of station\n",
- le32_to_cpup((__le32 *)tlv_data));
+ le32_to_cpup((const __le32 *)tlv_data));
goto tlv_error;
}
capa->num_stations =
- le32_to_cpup((__le32 *)tlv_data);
+ le32_to_cpup((const __le32 *)tlv_data);
break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
- struct iwl_umac_debug_addrs *dbg_ptrs =
- (void *)tlv_data;
+ const struct iwl_umac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@@ -1201,8 +1175,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
- struct iwl_lmac_debug_addrs *dbg_ptrs =
- (void *)tlv_data;
+ const struct iwl_lmac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
@@ -1277,7 +1251,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (len) {
IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
- iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+ iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
return -EINVAL;
}
@@ -1418,7 +1392,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
- struct iwl_ucode_header *ucode;
+ const struct iwl_ucode_header *ucode;
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
@@ -1456,7 +1430,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
/* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
+ ucode = (const struct iwl_ucode_header *)ucode_raw->data;
if (ucode->ver)
err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
@@ -1645,6 +1619,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
+ iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
+
mutex_lock(&iwlwifi_opmode_table_mtx);
switch (fw->type) {
case IWL_FW_DVM:
@@ -1661,8 +1637,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
- iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
-
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@@ -1796,6 +1770,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
kfree(drv);
}
+#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
@@ -1803,7 +1778,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
- .enable_ini = true,
+ .enable_ini = ENABLE_INI,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1915,10 +1890,42 @@ MODULE_PARM_DESC(nvm_file, "NVM file name");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
-module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
- bool, S_IRUGO | S_IWUSR);
+
+static int enable_ini_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ bool res;
+ __u32 new_enable_ini;
+
+ /* in case the argument type is a number */
+ ret = kstrtou32(arg, 0, &new_enable_ini);
+ if (!ret) {
+ if (new_enable_ini > ENABLE_INI) {
+ pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
+ return -EINVAL;
+ }
+ goto out;
+ }
+
+ /* in case the argument type is boolean */
+ ret = kstrtobool(arg, &res);
+ if (ret)
+ return ret;
+ new_enable_ini = (res ? ENABLE_INI : 0);
+
+out:
+ iwlwifi_mod_params.enable_ini = new_enable_ini;
+ return 0;
+}
+
+static const struct kernel_param_ops enable_ini_ops = {
+ .set = enable_ini_set
+};
+
+module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
MODULE_PARM_DESC(enable_ini,
- "Enable debug INI TLV FW debug infrastructure (default: true");
+ "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
+ "Debug INI TLV FW debug infrastructure (default: 16)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 0fd009e6d685..80073f973334 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -84,7 +84,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
* everything is built-in, then we can avoid that.
*/
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
-#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
+#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI)
#else
#define IWL_EXPORT_SYMBOL(sym)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index b9e86bf972e5..5f386bb1a353 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -23,26 +23,22 @@
*/
#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
/*
* The device's EEPROM semaphore prevents conflicts between driver and uCode
* when accessing the EEPROM; each access is a series of pulses to/from the
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
u16 count;
int ret;
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
@@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
+ IWL_EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(trans->dev,
"Acquired semaphore after %d tries.\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e6fd4941a4cb..bedd78a47f67 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -590,11 +590,31 @@ struct iwl_rb_status {
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
+#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
+/* IMR DMA registers */
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c
+#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524
+#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528
+#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c
+#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530
+
+/* RFH S2D DMA registers */
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c
+#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002
+
+/* TFH D2S DMA registers */
+#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000
+#define IMR_UREG_CHICK 0x00d05c00
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030
+#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000
+
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
@@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
- * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwl_gen3_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+struct iwl_gen3_bc_tbl_entry {
+ __le16 tfd_offset;
} __packed;
#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 253eac4cbf59..396f2c997da6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
- u32 value = 0x5a5a5a5a;
-
if (iwl_trans_grab_nic_access(trans)) {
- value = iwl_read32(trans, reg);
+ u32 value = iwl_read32(trans, reg);
+
iwl_trans_release_nic_access(trans);
+ return value;
}
- return value;
+ return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_direct32);
@@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
- u32 val = 0x5a5a5a5a;
-
if (iwl_trans_grab_nic_access(trans)) {
- val = iwl_read_prph_no_grab(trans, ofs);
+ u32 val = iwl_read_prph_no_grab(trans, ofs);
+
iwl_trans_release_nic_access(trans);
+
+ return val;
}
- return val;
+
+ return 0x5a5a5a5a;
}
IWL_EXPORT_SYMBOL(iwl_read_prph);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 004ebdac4535..d0b4d02bdab9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
*/
#ifndef __iwl_modparams_h__
#define __iwl_modparams_h__
@@ -83,7 +83,8 @@ struct iwl_mod_params {
*/
bool disable_11ax;
bool remove_when_gone;
- bool enable_ini;
+ u32 enable_ini;
+ bool disable_11be;
};
static inline bool iwl_enable_rx_ampdu(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 04addf964d83..9040da3dcce3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -375,10 +375,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (v4)
ch_flags =
- __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
+ __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx);
else
ch_flags =
- __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
+ __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx);
if (band == NL80211_BAND_5GHZ &&
!data->sku_cap_band_52ghz_enable)
@@ -583,9 +583,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
@@ -653,9 +653,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
.phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
@@ -731,7 +731,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa);
/* we know it's writable - we set it before ourselves */
- iftype_data = (void *)sband->iftype_data;
+ iftype_data = (void *)(uintptr_t)sband->iftype_data;
for (i = 0; i < sband->n_iftype_data; i++)
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
}
@@ -783,6 +783,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_MR:
+ case IWL_CFG_RF_TYPE_MS:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
@@ -911,7 +912,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + SKU);
- return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
+ return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
}
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
@@ -919,8 +920,8 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
- return le32_to_cpup((__le32 *)(nvm_sw +
- NVM_VERSION_EXT_NVM));
+ return le32_to_cpup((const __le32 *)(nvm_sw +
+ NVM_VERSION_EXT_NVM));
}
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
@@ -929,7 +930,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + RADIO_CFG);
- return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
+ return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
}
@@ -940,7 +941,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
- n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+ n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
return n_hw_addr & N_HW_ADDR_MASK;
}
@@ -1079,7 +1080,9 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return -EINVAL;
}
- IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+ if (!trans->csme_own)
+ IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n",
+ data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR));
return 0;
}
@@ -1384,8 +1387,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
nvm_chan = iwl_nvm_channels;
}
- if (WARN_ON(num_of_ch > max_num_ch))
+ if (num_of_ch > max_num_ch) {
+ IWL_DEBUG_DEV(dev, IWL_DL_LAR,
+ "Num of channels (%d) is greater than expected. Truncating to %d\n",
+ num_of_ch, max_num_ch);
num_of_ch = max_num_ch;
+ }
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
@@ -1591,7 +1598,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
}
eof = fw_entry->data + fw_entry->size;
- dword_buff = (__le32 *)fw_entry->data;
+ dword_buff = (const __le32 *)fw_entry->data;
/* some NVM file will contain a header.
* The header is identified by 2 dwords header as follow:
@@ -1603,7 +1610,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
if (fw_entry->size > NVM_HEADER_SIZE &&
dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
- file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+ file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE);
IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
IWL_INFO(trans, "NVM Manufacturing date %08X\n",
le32_to_cpu(dword_buff[3]));
@@ -1616,7 +1623,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
goto out;
}
} else {
- file_sec = (void *)fw_entry->data;
+ file_sec = (const void *)fw_entry->data;
}
while (true) {
@@ -1684,7 +1691,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
nvm_sections[section_id].length = section_size;
/* advance to the next section */
- file_sec = (void *)(file_sec->data + section_size);
+ file_sec = (const void *)(file_sec->data + section_size);
}
out:
release_firmware(fw_entry);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 5378315d0179..0a93ac769f66 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021 Intel Corporation
* Copyright (C) 2016 Intel Deutschland GmbH
*/
#include <linux/slab.h>
@@ -13,8 +13,6 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
struct iwl_phy_db_entry {
u16 size;
u8 *data;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 95b3dae7b504..a22788a68168 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -354,10 +354,10 @@
#define WFPM_GP2 0xA030B4
/* DBGI SRAM Register details */
-#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
-#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
+#define DBGI_SRAM_FIFO_POINTERS 0x00A2E148
+#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF
enum {
ENABLE_WFPM = BIT(31),
@@ -386,6 +386,11 @@ enum {
#define UREG_LMAC1_CURRENT_PC 0xa05c1c
#define UREG_LMAC2_CURRENT_PC 0xa05c20
+#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c
+#define WFPM_ARC1_PD_NOTIFICATION 0xa03044
+#define HPM_SECONDARY_DEVICE_STATE 0xa03404
+
+
/* For UMAG_GEN_HW_STATUS reg check */
enum {
UMAG_GEN_HW_IS_FPGA = BIT(1),
@@ -491,4 +496,6 @@ enum {
#define HBUS_TIMEOUT 0xA5A5A5A1
#define WFPM_DPHY_OFF 0xDF10FF
+#define REG_OTP_MINOR 0xA0333C
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 9236f9106826..b1af9359cea5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
else
trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
/*
@@ -203,10 +207,10 @@ IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
static int iwl_hcmd_names_cmp(const void *key, const void *elt)
{
const struct iwl_hcmd_names *name = elt;
- u8 cmd1 = *(u8 *)key;
+ const u8 *cmd1 = key;
u8 cmd2 = name->cmd_id;
- return (cmd1 - cmd2);
+ return (*cmd1 - cmd2);
}
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1bcaa3598785..d659ccd065f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -406,6 +406,9 @@ struct iwl_dump_sanitize_ops {
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
* @fw_reset_handshake: firmware supports reset flow handshake
+ * @queue_alloc_cmd_ver: queue allocation command version, set to 0
+ * for using the older SCD_QUEUE_CFG, set to the version of
+ * SCD_QUEUE_CONFIG_CMD otherwise.
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@@ -424,6 +427,7 @@ struct iwl_trans_config {
u8 cb_data_offs;
bool fw_reset_handshake;
+ u8 queue_alloc_cmd_ver;
};
struct iwl_trans_dump_data {
@@ -569,10 +573,9 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
/* 22000 functions */
- int (*txq_alloc)(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int queue_wdg_timeout);
+ int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
@@ -615,6 +618,10 @@ struct iwl_trans_ops {
int (*set_reduce_power)(struct iwl_trans *trans,
const void *data, u32 len);
void (*interrupts)(struct iwl_trans *trans, bool enable);
+ int (*imr_dma_data)(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr,
+ u32 byte_cnt);
+
};
/**
@@ -722,6 +729,26 @@ struct iwl_self_init_dram {
};
/**
+ * struct iwl_imr_data - imr dram data used during debug process
+ * @imr_enable: imr enable status received from fw
+ * @imr_size: imr dram size received from fw
+ * @sram_addr: sram address from debug tlv
+ * @sram_size: sram size from debug tlv
+ * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr_curr_addr: current dst address used during dma transfer
+ * @imr_base_addr: imr address received from fw
+ */
+struct iwl_imr_data {
+ u32 imr_enable;
+ u32 imr_size;
+ u32 sram_addr;
+ u32 sram_size;
+ u32 imr2sram_remainbyte;
+ u64 imr_curr_addr;
+ __le64 imr_base_addr;
+};
+
+/**
* struct iwl_trans_debug - transport debug related data
*
* @n_dest_reg: num of reg_ops in %dbg_dest_tlv
@@ -785,6 +812,7 @@ struct iwl_trans_debug {
u32 ucode_preset;
bool restart_required;
u32 last_tp_resetfw;
+ struct iwl_imr_data imr_data;
};
struct iwl_dma_ptr {
@@ -904,6 +932,7 @@ struct iwl_txq {
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -929,6 +958,8 @@ struct iwl_trans_txqs {
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
+
+ u8 queue_alloc_cmd_ver;
};
/**
@@ -1220,9 +1251,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int wdg_timeout)
+ u32 flags, u32 sta_mask, u8 tid,
+ int size, unsigned int wdg_timeout)
{
might_sleep();
@@ -1234,8 +1264,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
- return trans->ops->txq_alloc(trans, flags, sta_id, tid,
- cmd_id, size, wdg_timeout);
+ return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
+ size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
@@ -1368,6 +1398,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
+static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr,
+ u32 byte_cnt)
+{
+ if (trans->ops->imr_dma_data)
+ return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
+ return 0;
+}
+
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
u32 value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index 2f7f0f994ca3..b4f45234cfc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -312,7 +312,7 @@ static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev,
memcpy(q_head + wr, hdr, tx_sz);
} else {
memcpy(q_head + wr, hdr, q_sz - wr);
- memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
+ memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr));
}
WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz));
@@ -432,7 +432,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx)
u32 q_sz;
u32 rd;
u32 wr;
- void *q_head;
+ u8 *q_head;
if (!iwl_mei_global_cldev)
return;
@@ -2003,7 +2003,11 @@ static void iwl_mei_remove(struct mei_cl_device *cldev)
}
static const struct mei_cl_device_id iwl_mei_tbl[] = {
- { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY},
+ {
+ .name = KBUILD_MODNAME,
+ .uuid = MEI_WLAN_UUID,
+ .version = MEI_CL_VERSION_ANY,
+ },
/* required last entry */
{ }
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 468102a95e1b..3472167c8370 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -102,8 +102,8 @@ static bool iwl_mei_rx_filter_arp(struct sk_buff *skb,
* src IP address - 4 bytes
* target MAC addess - 6 bytes
*/
- target_ip = (void *)((u8 *)(arp + 1) +
- ETH_ALEN + sizeof(__be32) + ETH_ALEN);
+ target_ip = (const void *)((const u8 *)(arp + 1) +
+ ETH_ALEN + sizeof(__be32) + ETH_ALEN);
/*
* ARP request is forwarded to ME only if IP address match in the
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b400867e94f0..a995bba0ba81 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -31,7 +31,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
mvmvif->rekey_data.akm = data->akm & 0xFF;
mvmvif->rekey_data.replay_ctr =
- cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+ cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
mutex_unlock(&mvm->mutex);
@@ -453,8 +453,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_TSC_RSC_PARAM,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -672,8 +671,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int i, err;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_PATTERNS,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (!wowlan->n_patterns)
@@ -921,8 +919,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_CONFIGURATION, 0) < 6) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1017,8 +1014,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_TKIP_PARAM,
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
struct wowlan_key_tkip_data tkip_data = {};
int size;
@@ -1058,7 +1054,6 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
};
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- IWL_ALWAYS_LONG_GROUP,
WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN);
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
@@ -1089,7 +1084,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
/* skip the sta_id at the beginning */
_kek_kck_cmd = (void *)
- ((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
+ ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
}
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
@@ -1489,7 +1484,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
- struct ieee80211_hdr *hdr = (void *)pktdata;
+ const struct ieee80211_hdr *hdr = (const void *)pktdata;
int truncated = pktlen - pktsize;
/* this would be a firmware bug */
@@ -2074,8 +2069,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
};
int ret, len;
u8 notif_ver;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
@@ -2182,8 +2176,7 @@ out_free_resp:
static struct iwl_wowlan_status_data *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
{
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- OFFLOADS_QUERY_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD,
IWL_FW_CMD_VER_UNKNOWN);
__le32 station_id = cpu_to_le32(sta_id);
u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
@@ -2704,7 +2697,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
/* start pseudo D3 */
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (err > 0)
err = -EINVAL;
@@ -2760,7 +2755,9 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
__iwl_mvm_resume(mvm, true);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
iwl_mvm_resume_tcm(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 445c94adb076..49898fd99594 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -426,8 +426,7 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
return -EINVAL;
/* only change from debug set <-> debug unset */
- if ((amsdu_len && mvmsta->orig_amsdu_len) ||
- (!!amsdu_len && mvmsta->orig_amsdu_len))
+ if (amsdu_len && mvmsta->orig_amsdu_len)
return -EBUSY;
if (amsdu_len) {
@@ -1479,7 +1478,7 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
.mvm = mvm,
};
u16 wait_cmds[] = {
- iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
};
u32 aid;
int ret;
@@ -1514,8 +1513,9 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
wait_cmds, ARRAY_SIZE(wait_cmds),
iwl_mvm_sniffer_apply, &apply);
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
- DATA_PATH_GROUP, 0), 0,
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ 0,
sizeof(he_mon_cmd), &he_mon_cmd);
/* no need to really wait, we already did anyway */
@@ -1727,8 +1727,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
- DEBUG_GROUP, 0);
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
/* Take care of alignment of both the position and the length */
@@ -1758,7 +1757,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
goto out;
}
- ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
*ppos += ret;
out:
@@ -1782,8 +1781,7 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
- hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
- DEBUG_GROUP, 0);
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
if (*ppos & 0x3 || count < 4) {
op = DEBUG_MEM_OP_WRITE_BYTES;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 628aee634b2a..430044bc4755 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_160:
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver >= 13) {
@@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v5 cmd_v5;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v5,
.len[0] = sizeof(cmd_v5),
@@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v7 cmd_v7;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v7,
.len[0] = sizeof(cmd_v7),
@@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v8 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_tof_range_req_cmd_v9 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v11 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v12 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
{
struct iwl_tof_range_req_cmd_v13 cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
@@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
if (new_api) {
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
@@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
iwl_mvm_ftm_reset(mvm);
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
- LOCATION_GROUP, 0),
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "failed to abort FTM process\n");
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index bda6da7d988e..9729680476fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
+ u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/*
* The command structure is the same for versions 6, 7 and 8 (only the
@@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id,
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD, 6);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
int err;
int cmd_size;
@@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
memcpy(cmd.bssid, vif->addr, ETH_ALEN);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
- LOCATION_GROUP, 0),
- 0, cmd_size, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
}
static int
@@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
};
u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
- LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
.data[1] = &data,
@@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
{
struct iwl_tof_responder_dyn_config_cmd cmd;
struct iwl_host_cmd hcmd = {
- .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
- LOCATION_GROUP, 0),
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
.data[0] = &cmd,
.len[0] = sizeof(cmd),
/* may not be able to DMA from stack */
@@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
struct ieee80211_ftm_responder_params *params)
{
int ret;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
switch (cmd_ver) {
case 2:
@@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
.addr = addr,
.hltk = hltk,
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index ae589b3b8c46..e842816134f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -25,11 +25,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define UCODE_VALID_OK cpu_to_le32(0x1)
-
-#define IWL_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_TAS_US_MCC 0x5553
#define IWL_TAS_CANADA_MCC 0x4341
@@ -79,7 +74,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
struct iwl_dqa_enable_cmd dqa_cmd = {
.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
};
- u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
@@ -126,13 +121,54 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 lmac_error_event_table, umac_error_table;
u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
+ u32 i;
- /*
- * For v5 and above, we can check the version, for older
- * versions we need to check the size.
- */
- if (version == 5 || version == 6) {
- /* v5 and v6 are compatible (only IMR addition) */
+ if (version == 6) {
+ struct iwl_alive_ntf_v6 *palive;
+
+ if (pkt_len < sizeof(*palive))
+ return false;
+
+ palive = (void *)pkt->data;
+ mvm->trans->dbg.imr_data.imr_enable =
+ le32_to_cpu(palive->imr.enabled);
+ mvm->trans->dbg.imr_data.imr_size =
+ le32_to_cpu(palive->imr.size);
+ mvm->trans->dbg.imr_data.imr2sram_remainbyte =
+ mvm->trans->dbg.imr_data.imr_size;
+ mvm->trans->dbg.imr_data.imr_base_addr =
+ palive->imr.base_addr;
+ mvm->trans->dbg.imr_data.imr_curr_addr =
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
+ IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
+ mvm->trans->dbg.imr_data.imr_enable,
+ mvm->trans->dbg.imr_data.imr_size,
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
+
+ if (!mvm->trans->dbg.imr_data.imr_enable) {
+ for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fw_ini_region_tlv *reg;
+
+ reg_tlv = mvm->trans->dbg.active_regions[i];
+ if (!reg_tlv)
+ continue;
+
+ reg = (void *)reg_tlv->data;
+ /*
+ * We have only one DRAM IMR region, so we
+ * can break as soon as we find the first
+ * one.
+ */
+ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+ mvm->trans->dbg.unsupported_region_msk |= BIT(i);
+ break;
+ }
+ }
+ }
+ }
+
+ if (version >= 5) {
struct iwl_alive_ntf_v5 *palive;
if (pkt_len < sizeof(*palive))
@@ -249,6 +285,26 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false;
}
+static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ enum iwl_device_family device_family = trans->trans_cfg->device_family;
+
+ if (device_family < IWL_DEVICE_FAMILY_8000)
+ return;
+
+ if (device_family <= IWL_DEVICE_FAMILY_9000)
+ IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION));
+ else
+ IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION));
+
+ IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n",
+ iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE));
+
+}
+
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
@@ -314,6 +370,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_read_prph(trans, SB_CPU_2_STATUS));
}
+ iwl_mvm_print_pd_notification(mvm);
+
/* LMAC/UMAC PC info */
if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_9000) {
@@ -546,8 +604,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return 0;
}
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
- SAR_OFFSET_MAPPING_TABLE_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 2) {
@@ -572,6 +629,7 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
+ u32 cmd_id = PHY_CONFIGURATION_CMD;
struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
struct iwl_phy_specific_cfg phy_filters = {};
@@ -603,8 +661,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.calib_control.flow_trigger =
mvm->fw->default_calib[ucode_type].flow_trigger;
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONFIGURATION_CMD,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
@@ -616,8 +673,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.phy_cfg);
cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
sizeof(struct iwl_phy_cfg_cmd_v1);
- return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
- cmd_size, &phy_cfg_cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
}
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
@@ -737,7 +793,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)mvm->nvm_data->channels + 1;
+ (void *)((u8 *)mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
@@ -760,6 +816,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
@@ -767,11 +824,14 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- REDUCE_TX_POWER_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
-
- if (cmd_ver == 6) {
+ if (cmd_ver == 7) {
+ len = sizeof(cmd.v7);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = cmd.v7.per_chain[0][0];
+ cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v6.per_chain[0][0];
@@ -805,7 +865,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
iwl_mei_set_power_limit(per_chain);
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -814,9 +874,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
- struct iwl_host_cmd cmd;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD,
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = { &geo_tx_cmd },
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
/* the ops field is at the same spot for all versions, so set in v1 */
@@ -838,12 +901,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
if (!iwl_sar_geo_support(&mvm->fwrt))
return -EOPNOTSUPP;
- cmd = (struct iwl_host_cmd){
- .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
- .len = { len, },
- .flags = CMD_WANT_SKB,
- .data = { &geo_tx_cmd },
- };
+ cmd.len[0] = len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -863,14 +921,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
u32 n_bands;
u32 n_profiles;
u32 sk = 0;
int ret;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
@@ -948,167 +1006,19 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SAR_TABLE_VER))
cmd.v2.table_revision = cpu_to_le32(sk);
- return iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(PHY_OPS_GROUP,
- PER_CHAIN_LIMIT_OFFSET_CMD),
- 0, len, &cmd);
-}
-
-static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
-{
- union acpi_object *wifi_pkg, *data, *flags;
- int i, j, ret, tbl_rev, num_sub_bands;
- int idx = 2;
-
- mvm->fwrt.ppag_flags = 0;
-
- data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- IWL_DEBUG_RADIO(mvm,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
- tbl_rev);
- goto read_table;
- } else {
- ret = -EINVAL;
- goto out_free;
- }
- }
-
- /* try to read ppag table revision 0 */
- wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
- }
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
- goto read_table;
- }
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
-
-read_table:
- mvm->fwrt.ppag_ver = tbl_rev;
- flags = &wifi_pkg->package.elements[1];
-
- if (flags->type != ACPI_TYPE_INTEGER) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK;
-
- if (!mvm->fwrt.ppag_flags) {
- ret = 0;
- goto out_free;
- }
-
- /*
- * read, verify gain values and save them into the PPAG table.
- * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
- * following sub-bands to High-Band (5GHz).
- */
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- union acpi_object *ent;
-
- ent = &wifi_pkg->package.elements[idx++];
- if (ent->type != ACPI_TYPE_INTEGER) {
- ret = -EINVAL;
- goto out_free;
- }
-
- mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value;
-
- if ((j == 0 &&
- (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- mvm->fwrt.ppag_flags = 0;
- ret = -EINVAL;
- goto out_free;
- }
- }
- }
-
- ret = 0;
-out_free:
- kfree(data);
- return ret;
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
union iwl_ppag_table_cmd cmd;
- u8 cmd_ver;
- int i, j, ret, num_sub_bands, cmd_size;
- s8 *gain;
+ int ret, cmd_size;
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG capability not supported by FW, command not sent.\n");
- return 0;
- }
- if (!mvm->fwrt.ppag_flags) {
- IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
+ ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ /* Not supporting PPAG table is a valid scenario */
+ if(ret < 0)
return 0;
- }
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags);
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD,
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd.v1.gain[0];
- cmd_size = sizeof(cmd.v1);
- if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table rev is %d but FW supports v1, sending truncated table\n",
- mvm->fwrt.ppag_ver);
- cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- }
- } else if (cmd_ver == 2 || cmd_ver == 3) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd.v2.gain[0];
- cmd_size = sizeof(cmd.v2);
- if (mvm->fwrt.ppag_ver == 0) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table is v1 but FW supports v2, sending padded table\n");
- } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG table is v3 but FW supports v2, sending partial bitmap.\n");
- cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- }
- } else {
- IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
- return 0;
- }
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- mvm->fwrt.ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(mvm,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
@@ -1120,40 +1030,11 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
return ret;
}
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- {}
-};
-
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(mvm,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- mvm->fwrt.ppag_flags = 0;
+ if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
return 0;
- }
return iwl_mvm_ppag_send_cmd(mvm);
}
@@ -1205,11 +1086,12 @@ static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigne
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
+ u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- struct iwl_tas_config_cmd_v3 cmd = {};
- int cmd_size;
+ union iwl_tas_config_cmd cmd = {};
+ int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) <
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
APCI_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
@@ -1217,7 +1099,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
return;
}
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1232,25 +1117,24 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
- &cmd.block_list_size, IWL_TAS_US_MCC)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.block_list_array,
- &cmd.block_list_size, IWL_TAS_CANADA_MCC))) {
+ if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+ &cmd.v4.block_list_size,
+ IWL_TAS_US_MCC)) ||
+ (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
+ &cmd.v4.block_list_size,
+ IWL_TAS_CANADA_MCC))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
}
}
- cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP,
- TAS_CONFIG,
- IWL_FW_CMD_VER_UNKNOWN) < 3 ?
+ /* v4 is the same size as v3, so no need to differentiate here */
+ cmd_size = fw_ver < 3 ?
sizeof(struct iwl_tas_config_cmd_v2) :
sizeof(struct iwl_tas_config_cmd_v3);
- ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
- TAS_CONFIG),
- 0, cmd_size, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
@@ -1283,7 +1167,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int ret;
u32 value;
- struct iwl_lari_config_change_cmd_v5 cmd = {};
+ struct iwl_lari_config_change_cmd_v6 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
@@ -1310,25 +1194,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
if (cmd.config_bitmap ||
cmd.oem_uhb_allow_bitmap ||
cmd.oem_11ax_allow_bitmap ||
cmd.oem_unii4_allow_bitmap ||
- cmd.chan_state_active_bitmap) {
+ cmd.chan_state_active_bitmap ||
+ cmd.force_disable_channels_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE, 1);
- if (cmd_ver == 5)
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 1);
+ switch (cmd_ver) {
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ case 5:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
- else if (cmd_ver == 4)
+ break;
+ case 4:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
- else if (cmd_ver == 3)
+ break;
+ case 3:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
- else if (cmd_ver == 2)
+ break;
+ case 2:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
- else
+ break;
+ default:
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
IWL_DEBUG_RADIO(mvm,
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
@@ -1340,8 +1242,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
le32_to_cpu(cmd.chan_state_active_bitmap),
cmd_ver);
IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd.oem_uhb_allow_bitmap));
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd.force_disable_channels_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
@@ -1358,7 +1261,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
int ret;
/* read PPAG table */
- ret = iwl_mvm_get_ppag_table(mvm);
+ ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1641,9 +1544,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* internal aux station for all aux activities that don't
* requires a dedicated data queue.
*/
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* In old version the aux station uses mac id like other
* station and not lmac id
@@ -1658,8 +1559,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
while (!sband && i < NUM_NL80211_BANDS)
sband = mvm->hw->wiphy->bands[i++];
- if (WARN_ON_ONCE(!sband))
+ if (WARN_ON_ONCE(!sband)) {
+ ret = -ENODEV;
goto error;
+ }
chan = &sband->channels[0];
@@ -1800,9 +1703,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
/*
* Add auxiliary station for scanning.
* Newer versions of this command implies that the fw uses
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index fd7d4abfb454..5aa4520b70ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -821,10 +821,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
- bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
- LONG_GROUP,
- BEACON_TEMPLATE_CMD,
- 0) > 10;
+ bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
if (rate_idx <= IWL_FIRST_CCK_RATE)
flags |= is_new_rate ? IWL_MAC_BEACON_CCK
@@ -960,8 +957,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WARN_ON(channel == 0);
if (cfg80211_channel_is_psc(ctx->def.chan) &&
!IWL_MVM_DISABLE_AP_FILS) {
- flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- BEACON_TEMPLATE_CMD,
+ flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
IWL_MAC_BEACON_FILS_V1;
@@ -1458,8 +1454,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct sk_buff *skb;
u8 *data;
u32 size = le32_to_cpu(sb->byte_count);
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- STORED_BEACON_NTF, 0);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
+ 0);
if (size == 0)
return;
@@ -1602,6 +1599,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_vif, NULL);
return;
case NL80211_IFTYPE_STATION:
+ /*
+ * if we don't know about an ongoing channel switch,
+ * make sure FW cancels it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0) && !vif->csa_active) {
+ IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
+ iwl_mvm_cancel_channel_switch(mvm, vif, mac_id);
+ break;
+ }
+
iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
@@ -1615,6 +1624,31 @@ out_unlock:
rcu_read_unlock();
}
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(notif->mac_id);
+ u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif) {
+ rcu_read_unlock();
+ return;
+ }
+
+ IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n",
+ id, csa_err_mask);
+ if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ CS_ERR_LONG_DELAY_AFTER_CS |
+ CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+ ieee80211_channel_switch_disconnect(vif, true);
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 709a3df57b10..784d91281c02 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -374,28 +374,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- /* currently FW API supports only one optional cipher scheme */
- if (mvm->fw->cs[0].cipher) {
- const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
- struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
-
- mvm->hw->n_cipher_schemes = 1;
-
- cs->cipher = le32_to_cpu(fwcs->cipher);
- cs->iftype = BIT(NL80211_IFTYPE_STATION);
- cs->hdr_len = fwcs->hdr_len;
- cs->pn_len = fwcs->pn_len;
- cs->pn_off = fwcs->pn_off;
- cs->key_idx_off = fwcs->key_idx_off;
- cs->key_idx_mask = fwcs->key_idx_mask;
- cs->key_idx_shift = fwcs->key_idx_shift;
- cs->mic_len = fwcs->mic_len;
-
- mvm->hw->cipher_schemes = mvm->cs;
- mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
- hw->wiphy->n_cipher_suites++;
- }
-
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
wiphy_ext_feature_set(hw->wiphy,
@@ -553,8 +531,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- WOWLAN_KEK_KCK_MATERIAL,
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
IWL_FW_CMD_VER_UNKNOWN) == 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
@@ -567,9 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
if (iwl_mvm_is_oce_supported(mvm)) {
- u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC, 0);
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
@@ -1154,7 +1129,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* async_handlers_wk is now blocked */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12)
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
@@ -1246,6 +1221,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
int len;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
@@ -1253,14 +1229,15 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
.common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- REDUCE_TX_POWER_CMD,
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 6)
+ if (cmd_ver == 7)
+ len = sizeof(cmd.v7);
+ else if (cmd_ver == 6)
len = sizeof(cmd.v6);
else if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_REDUCE_TX_POWER))
@@ -1274,7 +1251,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@@ -1335,6 +1312,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
+ /*
+ * In the new flow since FW is in charge of the timing,
+ * if driver has canceled the channel switch he will receive the
+ * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
mutex_lock(&mvm->mutex);
@@ -1845,11 +1831,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
return res;
}
+static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
+ struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit)
+{
+ int i;
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ ru_index_tmp >>= 1;
+
+ if (!(ru_index_tmp & 1))
+ continue;
+
+ high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+}
+
+static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+ u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &sta->he_cap.ppe_thres[0];
+ u8 ru_index_bitmap =
+ u8_get_bits(*ppe,
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit);
+}
+
+static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding,
+ u32 *flags)
+{
+ int low_th = -1;
+ int high_th = -1;
+ int i;
+
+ switch (nominal_padding) {
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ /* Set the PPE thresholds accordingly */
+ if (low_th >= 0 && high_th >= 0) {
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+
+ *flags |= STA_CTXT_HE_PACKET_EXT;
+ }
+}
+
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+ struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
.bss_color = vif->bss_conf.he_bss_color.color,
@@ -1857,16 +1940,39 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
- int size = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_MBSSID_HE) ?
- sizeof(sta_ctxt_cmd) :
- sizeof(struct iwl_he_sta_context_cmd_v1);
+ struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
+ int size;
struct ieee80211_sta *sta;
u32 flags;
int i;
const struct ieee80211_sta_he_cap *own_he_cap = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
const struct ieee80211_supported_band *sband;
+ void *cmd;
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
+ ver = 1;
+
+ switch (ver) {
+ case 1:
+ /* same layout as v2 except some data at the end */
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v1);
+ break;
+ case 2:
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v2);
+ break;
+ case 3:
+ cmd = &sta_ctxt_cmd;
+ size = sizeof(struct iwl_he_sta_context_cmd_v3);
+ break;
+ default:
+ IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
+ return;
+ }
rcu_read_lock();
@@ -1931,97 +2037,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
* Initialize the PPE thresholds to "None" (7), as described in Table
* 9-262ac of 80211.ax/D3.0.
*/
- memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+ memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE,
+ sizeof(sta_ctxt_cmd.pkt_ext));
/* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- u8 nss = (sta->he_cap.ppe_thres[0] &
- IEEE80211_PPE_THRES_NSS_MASK) + 1;
- u8 ru_index_bitmap =
- (sta->he_cap.ppe_thres[0] &
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
- u8 *ppe = &sta->he_cap.ppe_thres[0];
- u8 ppe_pos_bit = 7; /* Starting after PPE header */
-
- /*
- * FW currently supports only nss == MAX_HE_SUPP_NSS
- *
- * If nss > MAX: we can ignore values we don't support
- * If nss < MAX: we can set zeros in other streams
- */
- if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
- nss = MAX_HE_SUPP_NSS;
- }
-
- for (i = 0; i < nss; i++) {
- u8 ru_index_tmp = ru_index_bitmap << 1;
- u8 bw;
-
- for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
- ru_index_tmp >>= 1;
- if (!(ru_index_tmp & 1))
- continue;
-
- sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
- iwl_mvm_he_get_ppe_val(ppe,
- ppe_pos_bit);
- ppe_pos_bit +=
- IEEE80211_PPE_THRES_INFO_PPET_SIZE;
- sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
- iwl_mvm_he_get_ppe_val(ppe,
- ppe_pos_bit);
- ppe_pos_bit +=
- IEEE80211_PPE_THRES_INFO_PPET_SIZE;
- }
- }
-
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta,
+ &sta_ctxt_cmd.pkt_ext);
flags |= STA_CTXT_HE_PACKET_EXT;
- } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)
- != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) {
- int low_th = -1;
- int high_th = -1;
-
- /* Take the PPE thresholds from the nominal padding info */
- switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
- low_th = IWL_HE_PKT_EXT_NONE;
- high_th = IWL_HE_PKT_EXT_NONE;
- break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
- low_th = IWL_HE_PKT_EXT_BPSK;
- high_th = IWL_HE_PKT_EXT_NONE;
- break;
- case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
- low_th = IWL_HE_PKT_EXT_NONE;
- high_th = IWL_HE_PKT_EXT_BPSK;
- break;
- }
-
- /* Set the PPE thresholds accordingly */
- if (low_th >= 0 && high_th >= 0) {
- struct iwl_he_pkt_ext *pkt_ext =
- (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
-
- for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
- u8 bw;
-
- for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
- bw++) {
- pkt_ext->pkt_ext_qam_th[i][bw][0] =
- low_th;
- pkt_ext->pkt_ext_qam_th[i][bw][1] =
- high_th;
- }
- }
-
- flags |= STA_CTXT_HE_PACKET_EXT;
- }
+ /* PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding fiels. */
+ } else {
+ u8 nominal_padding =
+ u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext,
+ nominal_padding,
+ &flags);
}
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
@@ -2084,9 +2118,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.flags = cpu_to_le32(flags);
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
- DATA_PATH_GROUP, 0),
- 0, size, &sta_ctxt_cmd))
+ if (ver < 3) {
+ /* fields before pkt_ext */
+ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
+ offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
+ offsetof(typeof(sta_ctxt_cmd), pkt_ext));
+
+ /* pkt_ext */
+ for (i = 0;
+ i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
+ i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
+ bw++) {
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
+ sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
+
+ memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
+ &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
+ sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
+ }
+ }
+
+ /* fields after pkt_ext */
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
+ sizeof(sta_ctxt_cmd_v2) -
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy((u8 *)&sta_ctxt_cmd_v2 +
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
+ (u8 *)&sta_ctxt_cmd +
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
+ sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
+ sta_ctxt_cmd_v2.reserved3 = 0;
+ }
+
+ if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
@@ -2301,11 +2372,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
- * A firmware with the new API will remove it automatically.
*/
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
- iwl_mvm_stop_session_protection(mvm, vif);
+ iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@ -2949,7 +3017,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
if (he_cap) {
/* we know that ours is writable */
- struct ieee80211_sta_he_cap *he = (void *)he_cap;
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
he->he_cap_elem.phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
@@ -3413,12 +3481,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
/* support HW crypto on TX */
return 0;
default:
- /* currently FW supports only one optional cipher scheme */
- if (hw->n_cipher_schemes &&
- hw->cipher_schemes->cipher == key->cipher)
- key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
- else
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
switch (cmd) {
@@ -3801,8 +3864,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) {
u32 lmac_id;
lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
@@ -4606,6 +4668,15 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
/*
+ * In the new flow FW is in charge of timing the switch so there
+ * is no need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0))
+ break;
+
+ /*
* We haven't configured the firmware to be associated yet since
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
@@ -4676,6 +4747,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
.cs_mode = chsw->block_tx,
};
+ /*
+ * In the new flow FW is in charge of timing the switch so there is no
+ * need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d78f40730594..c6bc85d4600a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1064,7 +1064,6 @@ struct iwl_mvm {
u32 ciphers[IWL_MVM_NUM_CIPHERS];
- struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct cfg80211_ftm_responder_stats ftm_resp_stats;
struct {
@@ -1086,7 +1085,6 @@ struct iwl_mvm {
} cmd_ver;
struct ieee80211_vif *nan_vif;
-#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
/*
@@ -1106,6 +1104,8 @@ struct iwl_mvm {
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
+
+ bool sta_remove_requires_queue_remove;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1671,6 +1671,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1932,10 +1934,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
void iwl_mvm_stop_device(struct iwl_mvm *mvm);
-/* Re-configure the SCD for a queue that has already been configured */
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
- int tid, int frame_limit, u16 ssn);
-
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
@@ -2085,6 +2083,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
@@ -2159,8 +2159,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
- u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
IWL_FW_CMD_VER_UNKNOWN);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index 41880517e8bb..c7dabc6b3765 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- PROT_OFFLOAD_CONFIG_CMD, 0);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 1f8b97995b94..b2f33ebdf485 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -24,6 +24,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw/api/scan.h"
+#include "fw/api/rfi.h"
#include "time-event.h"
#include "fw-api.h"
#include "fw/acpi.h"
@@ -32,6 +33,7 @@
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IWLWIFI);
static const struct iwl_op_mode_ops iwl_mvm_ops;
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
@@ -191,7 +193,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (he_cap) {
/* we know that ours is writable */
- struct ieee80211_sta_he_cap *he = (void *)he_cap;
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
WARN_ON(!he->has_he);
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
@@ -235,7 +237,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
*/
mvm->fw_static_smps_request =
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
- ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ ieee80211_iterate_interfaces(mvm->hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
iwl_mvm_intf_dual_chain_req, NULL);
}
@@ -382,6 +385,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
iwl_mvm_channel_switch_start_notif,
RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+ iwl_mvm_channel_switch_error_notif,
+ RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_channel_switch_error_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -390,6 +397,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_thermal_dual_chain_req,
RX_HANDLER_ASYNC_LOCKED,
struct iwl_thermal_dual_chain_request),
+
+ RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
+ iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_rfi_deactivate_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -443,7 +454,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
- HCMD_NAME(DC2DC_CONFIG_CMD),
HCMD_NAME(NVM_ACCESS_CMD),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
@@ -499,6 +509,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
+ HCMD_NAME(RFI_DEACTIVATE_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
@@ -535,6 +546,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+ HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -633,13 +645,11 @@ unlock:
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_fwrt_dump_start(void *ctx)
+static void iwl_mvm_fwrt_dump_start(void *ctx)
{
struct iwl_mvm *mvm = ctx;
mutex_lock(&mvm->mutex);
-
- return 0;
}
static void iwl_mvm_fwrt_dump_end(void *ctx)
@@ -1076,12 +1086,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
op_mode = hw->priv;
@@ -1244,6 +1254,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+ trans_cfg.queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0);
+ mvm->sta_remove_requires_queue_remove =
+ trans_cfg.queue_alloc_cmd_ver > 0;
+
/* Configure transport layer */
iwl_trans_configure(mvm->trans, &trans_cfg);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 9af40b0fa37a..a3cefbc43e80 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -158,8 +158,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* we only support RLC command version 2 */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) < 2)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
chains_static, chains_dynamic);
}
@@ -172,8 +171,7 @@ static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm,
.phy_id = cpu_to_le32(ctxt->id),
};
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) < 2)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
return 0;
BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
@@ -209,8 +207,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
u32 action)
{
int ret;
- int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONTEXT_CMD, 1);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
if (ver == 3 || ver == 4) {
struct iwl_phy_context_cmd cmd = {};
@@ -301,8 +298,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
lockdep_assert_held(&mvm->mutex);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- RLC_CONFIG_CMD, 0) >= 2 &&
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
ctxt->channel == chandef->chan &&
ctxt->width == chandef->width &&
ctxt->center_freq1 == chandef->center_freq1)
@@ -349,18 +345,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* otherwise we might not be able to reuse this phy.
*/
if (ctxt->ref == 0) {
- struct ieee80211_channel *chan;
+ struct ieee80211_channel *chan = NULL;
struct cfg80211_chan_def chandef;
- struct ieee80211_supported_band *sband = NULL;
- enum nl80211_band band = NL80211_BAND_2GHZ;
+ struct ieee80211_supported_band *sband;
+ enum nl80211_band band;
+ int channel;
- while (!sband && band < NUM_NL80211_BANDS)
- sband = mvm->hw->wiphy->bands[band++];
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = mvm->hw->wiphy->bands[band];
- if (WARN_ON(!sband))
- return;
+ if (!sband)
+ continue;
+
+ for (channel = 0; channel < sband->n_channels; channel++)
+ if (!(sband->channels[channel].flags &
+ IEEE80211_CHAN_DISABLED)) {
+ chan = &sband->channels[channel];
+ break;
+ }
- chan = &sband->channels[0];
+ if (chan)
+ break;
+ }
+
+ if (WARN_ON(!chan))
+ return;
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index 3d0166df2002..c862bd243b55 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index f054ce76bed5..bb77bc9aa821 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -125,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
return ERR_PTR(-EIO);
- resp = kzalloc(resp_size, GFP_KERNEL);
+ resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
if (!resp)
return ERR_PTR(-ENOMEM);
- memcpy(resp, cmd.resp_pkt->data, resp_size);
-
iwl_free_resp(&cmd);
return resp;
}
+
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
+
+ IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 66808c55aa0e..9830d2663689 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
if (he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+ sband->iftype_data &&
+ sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
return flags;
@@ -420,7 +423,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_hw *hw = mvm->hw;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
- u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd_v4 cfg_cmd = {
@@ -449,8 +452,22 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0);
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD),
+ 0);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+ cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
+ cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
+ cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
+ cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
+ cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
+ cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
if (cmd_ver == 4) {
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
sizeof(cfg_cmd), &cfg_cmd);
@@ -474,8 +491,9 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD, 0) < 3)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD), 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index f4d02f9fe16d..62114616317c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -454,8 +454,6 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
};
-#define MCS_INDEX_PER_STREAM (8)
-
static const char *rs_pretty_lq_type(enum iwl_table_type type)
{
static const char * const lq_types[] = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 64446a11ef98..78198da7e55b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen = len - hdrlen;
if (fraglen) {
- int offset = (void *)hdr + hdrlen -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + hdrlen -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
@@ -640,7 +640,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
- if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
+ if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
return;
if (vif->type != NL80211_IFTYPE_STATION)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 295629c5c035..2c43a9989783 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -209,13 +209,16 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
+ else
+ /* mac80211 assumes full CSUM including SNAP header */
+ skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
fraglen = len - headlen;
if (fraglen) {
- int offset = (void *)hdr + headlen + pad_len -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + headlen + pad_len -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 5f92a09db374..a4077053e374 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -20,7 +20,6 @@
#define IWL_SCAN_DWELL_FRAGMENTED 44
#define IWL_SCAN_DWELL_EXTENDED 90
#define IWL_SCAN_NUM_OF_FRAGS 3
-#define IWL_SCAN_LAST_2_4_CHN 14
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
@@ -98,6 +97,7 @@ struct iwl_mvm_scan_params {
u32 n_6ghz_params;
bool scan_6ghz;
bool enable_6ghz_passive;
+ bool respect_p2p_go, respect_p2p_go_hb;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
}
-static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int *global_cnt = data;
-
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
- mvmvif->phy_ctxt->id < NUM_PHY_CTX)
- *global_cnt += 1;
-}
-
static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
{
return mvm->tcm.result.global_load;
@@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
-struct iwl_is_dcm_with_go_iterator_data {
+struct iwl_mvm_scan_iter_data {
+ u32 global_cnt;
struct ieee80211_vif *current_vif;
bool is_dcm_with_p2p_go;
};
-static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- struct iwl_is_dcm_with_go_iterator_data *data = _data;
- struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif *curr_mvmvif =
- iwl_mvm_vif_from_mac80211(data->current_vif);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_scan_iter_data *data = _data;
+ struct iwl_mvm_vif *curr_mvmvif;
- /* exclude the given vif */
- if (vif == data->current_vif)
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+ data->global_cnt += 1;
+
+ if (!data->current_vif || vif == data->current_vif)
return;
+ curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
+
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
- other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
- other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+ mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+ mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
}
@@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
- int global_cnt = 0;
+ struct iwl_mvm_scan_iter_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ .global_cnt = 0,
+ };
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_scan_condition_iterator,
- &global_cnt);
- if (!global_cnt)
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_iterator,
+ &data);
+
+ if (!data.global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
if (fw_has_api(&mvm->fw->ucode_capa,
@@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
return IWL_SCAN_TYPE_FRAGMENTED;
- /* in case of DCM with GO where BSS DTIM interval < 220msec
+ /*
+ * in case of DCM with GO where BSS DTIM interval < 220msec
* set all scan requests as fast-balance scan
- * */
+ */
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- vif->bss_conf.dtim_period < 220) {
- struct iwl_is_dcm_with_go_iterator_data data = {
- .current_vif = vif,
- .is_dcm_with_p2p_go = false,
- };
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_is_dcm_with_go_iterator,
- &data);
- if (data.is_dcm_with_p2p_go)
- return IWL_SCAN_TYPE_FAST_BALANCE;
- }
+ vif->bss_conf.dtim_period < 220 &&
+ data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
}
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
@@ -651,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
NL80211_BAND_2GHZ,
no_cck);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA,
- 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
@@ -1090,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
- WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12);
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1142,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
/* This function should not be called when using ADD_STA ver >=12 */
- WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12);
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12);
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1156,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
void *cfg;
int ret, cmd_size;
struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
};
enum iwl_mvm_scan_type type;
enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
@@ -1247,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{
struct iwl_scan_config cfg;
struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
.len[0] = sizeof(cfg),
.data[0] = &cfg,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@@ -1258,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) < 12) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) {
cfg.bcast_sta_id = mvm->aux_sta.sta_id;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- SCAN_CFG_CMD, 0) < 5) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
/*
* Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
* version 5.
@@ -1662,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
}
}
-static int
+static void
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct iwl_scan_probe_params_v4 *pp)
@@ -1731,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
pp->short_ssid_num = idex_s;
pp->bssid_num = idex_b;
- return 0;
}
/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
-static void
-iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
+static u32
+iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
u32 n_channels,
struct iwl_scan_probe_params_v4 *pp,
struct iwl_scan_channel_params_v6 *cp,
enum nl80211_iftype vif_type)
{
- struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config;
int i;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
params->scan_6ghz_params;
+ u32 ch_cnt;
- for (i = 0; i < params->n_channels; i++) {
+ for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
struct iwl_scan_channel_cfg_umac *cfg =
- &cp->channel_config[i];
+ &cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
bool force_passive, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
+ /*
+ * Avoid performing passive scan on non PSC channels unless the
+ * scan is specifically a passive scan, i.e., no SSIDs
+ * configured in the scan command.
+ */
+ if (!cfg80211_channel_is_psc(params->channels[i]) &&
+ !params->n_6ghz_params && params->n_ssids)
+ continue;
+
cfg->v1.channel_num = params->channels[i]->hw_value;
cfg->v2.band = 2;
cfg->v2.iter_count = 1;
@@ -1875,8 +1868,16 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
else
flags |= bssid_bitmap | (s_ssid_bitmap << 16);
- channel_cfg[i].flags |= cpu_to_le32(flags);
+ cfg->flags |= cpu_to_le32(flags);
+ ch_cnt++;
}
+
+ if (params->n_channels > ch_cnt)
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz: reducing number channels: (%u->%u)\n",
+ params->n_channels, ch_cnt);
+
+ return ch_cnt;
}
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
@@ -1893,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
/* set fragmented ebs for fragmented scan on HB channels */
- if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type)) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type)))
flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+ /*
+ * force EBS in case the scan is a fragmented and there is a need to take P2P
+ * GO operation into consideration during scan operation.
+ */
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type) &&
+ params->respect_p2p_go_hb)) {
+ IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
+ flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+ }
+
return flags;
}
@@ -2046,6 +2063,26 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
return flags;
}
+static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif, int type)
+{
+ u8 flags = 0;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ if (params->respect_p2p_go)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
+ if (params->respect_p2p_go_hb)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ } else {
+ if (params->respect_p2p_go)
+ flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ }
+
+ return flags;
+}
+
static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -2164,7 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
- void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
+ void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_scan_req_umac_tail_v2 *tail_v2 =
(struct iwl_scan_req_umac_tail_v2 *)sec_part;
@@ -2248,13 +2285,17 @@ iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_general_params_v11 *gp,
- u16 gen_flags)
+ u16 gen_flags, u8 gen_flags2)
{
struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
+ IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n",
+ gen_flags, gen_flags2);
+
gp->flags = cpu_to_le16(gen_flags);
+ gp->flags2 = gen_flags2;
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
@@ -2358,7 +2399,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
- gen_flags);
+ gen_flags, 0);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@@ -2384,6 +2425,7 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
int ret;
u16 gen_flags;
+ u8 gen_flags2;
u32 bitmap_ssid = 0;
mvm->scan_uid_status[uid] = type;
@@ -2392,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+
+ if (version >= 15)
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+ else
+ gen_flags2 = 0;
+
iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif,
&scan_p->general_params,
- gen_flags);
+ gen_flags, gen_flags2);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
@@ -2417,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
- ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
- if (ret)
- return ret;
+ iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
+
+ cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params,
+ params->n_channels,
+ pb, cp, vif->type);
+ if (!cp->count) {
+ mvm->scan_uid_status[uid] = 0;
+ return -EINVAL;
+ }
- iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
- params->n_channels,
- pb, cp, vif->type);
- cp->count = params->n_channels;
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
@@ -2588,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (uid < 0)
return uid;
- hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+ hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
- scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
@@ -2611,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
return uid;
}
+struct iwl_mvm_scan_respect_p2p_go_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool p2p_go;
+ enum nl80211_band band;
+};
+
+static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->phy_ctxt->id < NUM_PHY_CTX &&
+ (data->band == NUM_NL80211_BANDS ||
+ mvmvif->phy_ctxt->channel->band == data->band))
+ data->p2p_go = true;
+}
+
+static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum nl80211_band band)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
+ .current_vif = vif,
+ .p2p_go = false,
+ .band = band,
+ };
+
+ if (!low_latency)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_respect_p2p_go_iter,
+ &data);
+
+ return data.p2p_go;
+}
+
+static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ bool low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
+}
+
+static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ bool low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
+ NUM_NL80211_BANDS);
+}
+
+static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->respect_p2p_go =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_2GHZ);
+ params->respect_p2p_go_hb =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
+ }
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -2662,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_6ghz_params = req->scan_6ghz_params;
params.scan_6ghz = req->scan_6ghz;
iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
if (req->duration)
params.iter_notif = true;
@@ -2753,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.scan_plans = req->scan_plans;
iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
@@ -2922,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
ret = iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SCAN_ABORT_UMAC,
- IWL_ALWAYS_LONG_GROUP, 0),
+ WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
0, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
@@ -2978,8 +3107,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
- u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC,
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN);
base_size = iwl_scan_req_umac_get_size(scan_ver);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index feab0bfcd7a2..c7f9d3870f21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -87,6 +87,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
case IEEE80211_STA_RX_BW_160:
add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
fallthrough;
@@ -316,7 +317,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
}
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- u16 *queueptr, u8 tid, u8 flags)
+ u16 *queueptr, u8 tid)
{
int queue = *queueptr;
struct iwl_scd_txq_cfg_cmd cmd = {
@@ -325,11 +326,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
};
int ret;
+ lockdep_assert_held(&mvm->mutex);
+
if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (mvm->sta_remove_requires_queue_remove) {
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD);
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.queue = cpu_to_le32(queue),
+ };
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
+ sizeof(remove_cmd),
+ &remove_cmd);
+ } else {
+ ret = 0;
+ }
+
iwl_trans_txq_free(mvm->trans, queue);
*queueptr = IWL_MVM_INVALID_QUEUE;
- return 0;
+ return ret;
}
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
@@ -373,7 +391,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm->queue_info[queue].reserved = false;
iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
if (ret)
@@ -512,7 +530,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
- ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
+ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@@ -596,6 +614,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return queue;
}
+/* Re-configure the SCD for a queue that has already been configured */
+static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
+ "Trying to reconfig unallocated queue %d\n", queue))
+ return -ENXIO;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
/*
* If a given queue has a higher AC than the TID stream that is being compared
* to, the queue needs to be redirected to the lower AC. This function does that
@@ -716,21 +767,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
- int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- mvm->trans->cfg->min_256_ba_txq_size);
+ int queue, size;
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
mvm->trans->cfg->min_txq_size);
+ } else {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* this queue isn't used for traffic (cab_queue) */
+ if (IS_ERR_OR_NULL(sta)) {
+ size = IWL_MGMT_QUEUE_SIZE;
+ } else if (sta->he_cap.has_he) {
+ /* support for 256 ba size */
+ size = IWL_DEFAULT_QUEUE_SIZE_HE;
+ } else {
+ size = IWL_DEFAULT_QUEUE_SIZE;
+ }
+
+ rcu_read_unlock();
}
- do {
- __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
+ /* take the min with bc tbl entries allowed */
+ size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
- queue = iwl_trans_txq_alloc(mvm->trans, enable,
- sta_id, tid, SCD_QUEUE_CFG,
- size, timeout);
+ /* size needs to be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ do {
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
+ tid, size, timeout);
if (queue < 0)
IWL_DEBUG_TX_QUEUES(mvm,
@@ -1019,12 +1089,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- u16 tid_bitmap;
+ u16 q_tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
/*
* We need to take into account a situation in which a TXQ was
@@ -1037,7 +1107,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Mark this queue in the right bitmap, we'll send the command
* to the firmware later.
*/
- if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
set_bit(queue, changetid_queues);
IWL_DEBUG_TX_QUEUES(mvm,
@@ -1337,7 +1407,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
out_err:
queue_tmp = queue;
- iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
+ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
return ret;
}
@@ -1516,8 +1586,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
- 0) >= 12 &&
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
sta->type == IWL_STA_AUX_ACTIVITY)
cmd.mac_id_n_color = cpu_to_le32(mac_id);
else
@@ -1784,8 +1853,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
- 0);
+ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
@@ -1993,7 +2061,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
if (ret) {
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_disable_txq(mvm, NULL, queue,
- IWL_MAX_TID_COUNT, 0);
+ IWL_MAX_TID_COUNT);
return ret;
}
@@ -2065,7 +2133,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
- iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2082,7 +2150,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
return -EINVAL;
- iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2199,7 +2267,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
}
queue = *queueptr;
- iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@@ -2434,7 +2502,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
- iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
+ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@@ -2443,8 +2511,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
-#define IWL_MAX_RX_BA_SESSIONS 16
-
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
struct iwl_mvm_delba_data notif = {
@@ -2526,18 +2592,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
}
+static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+ .sta_id = mvm_sta->sta_id,
+ .add_modify = STA_MODE_MODIFY,
+ };
+ u32 status;
+ int ret;
+
+ if (start) {
+ cmd.add_immediate_ba_tid = tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
+ cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
+ } else {
+ cmd.remove_immediate_ba_tid = tid;
+ cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
+ }
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
+ !(status & IWL_ADD_STA_BAID_VALID_MASK)))
+ return -EINVAL;
+ return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ return -ENOSPC;
+ default:
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ return -EIO;
+ }
+}
+
+static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size, int baid)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+ cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ if (start) {
+ cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+ cmd.alloc.tid = tid;
+ cmd.alloc.ssn = cpu_to_le16(ssn);
+ cmd.alloc.win_size = cpu_to_le16(buf_size);
+ baid = -EIO;
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ cmd.remove_v1.baid = cpu_to_le32(baid);
+ BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+ } else {
+ cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
+ cmd.remove.tid = cpu_to_le32(tid);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
+ &cmd, &baid);
+ if (ret)
+ return ret;
+
+ if (!start) {
+ /* ignore firmware baid on remove */
+ baid = 0;
+ }
+
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+
+ if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
+ return -EINVAL;
+
+ return baid;
+}
+
+static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
+ bool start, int tid, u16 ssn, u16 buf_size,
+ int baid)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
+ return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
+ tid, ssn, buf_size, baid);
+
+ return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
+ tid, ssn, buf_size);
+}
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd cmd = {};
struct iwl_mvm_baid_data *baid_data = NULL;
- int ret;
- u32 status;
+ int ret, baid;
+ u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
+ IWL_MAX_BAID_OLD;
lockdep_assert_held(&mvm->mutex);
- if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
return -ENOSPC;
}
@@ -2583,59 +2757,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
reorder_buf_size / sizeof(baid_data->entries[0]);
}
- cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
- cmd.sta_id = mvm_sta->sta_id;
- cmd.add_modify = STA_MODE_MODIFY;
- if (start) {
- cmd.add_immediate_ba_tid = (u8) tid;
- cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
- cmd.rx_ba_window = cpu_to_le16(buf_size);
+ if (iwl_mvm_has_new_rx_api(mvm) && !start) {
+ baid = mvm_sta->tid_to_baid[tid];
} else {
- cmd.remove_immediate_ba_tid = (u8) tid;
+ /* we don't really need it in this case */
+ baid = -1;
}
- cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
- STA_MODIFY_REMOVE_BA_TID;
- status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
- iwl_mvm_add_sta_cmd_size(mvm),
- &cmd, &status);
- if (ret)
- goto out_free;
+ /* Don't send command to remove (start=0) BAID during restart */
+ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
+ baid);
- switch (status & IWL_ADD_STA_STATUS_MASK) {
- case ADD_STA_SUCCESS:
- IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
- start ? "start" : "stopp");
- break;
- case ADD_STA_IMMEDIATE_BA_FAILURE:
- IWL_WARN(mvm, "RX BA Session refused by fw\n");
- ret = -ENOSPC;
- break;
- default:
- ret = -EIO;
- IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
- start ? "start" : "stopp", status);
- break;
- }
-
- if (ret)
+ if (baid < 0) {
+ ret = baid;
goto out_free;
+ }
if (start) {
- u8 baid;
-
mvm->rx_ba_sessions++;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
- if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
- ret = -EINVAL;
- goto out_free;
- }
- baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
- IWL_ADD_STA_BAID_SHIFT);
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
@@ -2663,7 +2807,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
} else {
- u8 baid = mvm_sta->tid_to_baid[tid];
+ baid = mvm_sta->tid_to_baid[tid];
if (mvm->rx_ba_sessions > 0)
/* check that restart flow didn't zero the counter */
@@ -3238,8 +3382,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
int i, size;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
- int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA_KEY,
+ int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
new_api ? 2 : 1);
if (sta_id == IWL_MVM_INVALID_STA)
@@ -3939,7 +4082,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
- if (!WARN_ON(!mvmsta))
+ if (mvmsta)
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
rcu_read_unlock();
@@ -3998,3 +4141,21 @@ out:
iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
+
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 mac_id)
+{
+ struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
+ .mac_id = cpu_to_le32(mac_id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
+ CMD_ASYNC,
+ sizeof(cancel_channel_switch_cmd),
+ &cancel_channel_switch_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to cancel the channel switch\n");
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e34b82b2a288..f1a4fc3e4038 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -548,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
u8 *key, u32 key_len);
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 mac_id);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index ab06dcda1462..6edf2b79db43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -97,8 +97,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
* of use */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
- ADD_STA, 0) >= 12)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12)
iwl_mvm_rm_aux_sta(mvm);
}
@@ -658,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm,
@@ -923,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
}
cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
}
@@ -1162,8 +1161,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
- MAC_CONF_GROUP, 0) };
+ const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
@@ -1201,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
if (!wait_for_notif) {
if (iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
@@ -1219,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
iwl_mvm_session_prot_notif, NULL);
if (iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 398390c59344..69cf3a372759 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -160,6 +160,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
notif = (struct ct_kill_notif *)pkt->data;
IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
notif->temperature);
+ if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
+ CT_KILL_NOTIFICATION, 0) > 1)
+ IWL_DEBUG_TEMP(mvm,
+ "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
+ notif->dts, notif->scheme);
iwl_mvm_enter_ctkill(mvm);
}
@@ -240,8 +245,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
* a response. For older versions we send the command and wait for a
* notification (no command TLV for previous versions).
*/
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 1)
return iwl_mvm_send_temp_cmd(mvm, true, temp);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 9213f8518f10..7763037b93ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -318,15 +318,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
/* info->control is only relevant for non HW rate control */
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_data(fc),
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
info->control.rates[0].flags,
info->control.rates[0].idx,
- le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
+ le16_to_cpu(fc),
+ sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
rate_idx = info->control.rates[0].idx;
}
@@ -351,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
/* Set CCK or OFDM flag */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
if (!is_cck)
rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
else
@@ -654,7 +653,8 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
struct iwl_probe_resp_data *resp_data;
- u8 *ie, *pos;
+ const u8 *ie;
+ u8 *pos;
u8 match[] = {
(WLAN_OUI_WFA >> 16) & 0xff,
(WLAN_OUI_WFA >> 8) & 0xff,
@@ -671,10 +671,10 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
if (!resp_data->notif.noa_active)
goto out;
- ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
- mgmt->u.probe_resp.variable,
- skb->len - base_len,
- match, 4, 2);
+ ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+ mgmt->u.probe_resp.variable,
+ skb->len - base_len,
+ match, 4, 2);
if (!ie) {
IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
goto out;
@@ -1602,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
if (unlikely(!seq_ctl)) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
-
/*
* If it is an NDP, we can't update next_reclaim since
* its sequence control is 0. Note that for that same
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 1f3e90e5dbd4..bc947733d982 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -169,8 +169,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 8)
+ if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
/* In the new rate legacy rates are indexed:
* 0 - 3 for CCK and 0 - 7 for OFDM.
*/
@@ -241,38 +240,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
- int tid, int frame_limit, u16 ssn)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = frame_limit,
- .sta_id = sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = fifo,
- .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
- queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
- .tid = tid,
- };
- int ret;
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return -EINVAL;
-
- if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
- "Trying to reconfig unallocated queue %d\n", queue))
- return -ENXIO;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
- queue, fifo, ret);
-
- return ret;
-}
-
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @mvm: Driver data.
@@ -480,8 +447,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
cmd.low_latency_tx = 1;
}
- if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
- MAC_CONF_GROUP, 0),
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "Failed to send low latency command\n");
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 85a6da70ca78..75fd386b048e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -125,6 +125,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
+ if (trans->trans_cfg->imr_enabled)
+ control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 5178e852c5d3..b16d4ae182d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -491,18 +491,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* So devices */
{IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)},
- {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
- {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -668,8 +671,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
/* SO with GF2 */
IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
@@ -682,6 +685,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
/* MA with GF2 */
IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name),
@@ -1301,7 +1306,30 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
+/* MsP */
+/* For now we use the same FW as MR, but this will change in the future. */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_ms_a0, iwl_ax204_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
+ iwl_cfg_snj_a0_ms_a0, iwl_ax204_name)
#endif /* CONFIG_IWLMVM */
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index a43e56c7689f..f7e4f868363d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -104,6 +104,18 @@ struct iwl_rx_completion_desc {
} __packed;
/**
+ * struct iwl_rx_completion_desc_bz - Bz completion descriptor
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved: reserved
+ */
+struct iwl_rx_completion_desc_bz {
+ __le16 rbid;
+ u8 flags;
+ u8 reserved[1];
+} __packed;
+
+/**
* struct iwl_rxq - Rx queue
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
@@ -133,11 +145,7 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- union {
- void *used_bd;
- __le32 *bd_32;
- struct iwl_rx_completion_desc *cd;
- };
+ void *used_bd;
dma_addr_t used_bd_dma;
u32 read;
u32 write;
@@ -262,6 +270,20 @@ enum iwl_pcie_fw_reset_state {
};
/**
+ * enum wl_pcie_imr_status - imr dma transfer state
+ * @IMR_D2S_IDLE: default value of the dma transfer
+ * @IMR_D2S_REQUESTED: dma transfer requested
+ * @IMR_D2S_COMPLETED: dma transfer completed
+ * @IMR_D2S_ERROR: dma transfer error
+ */
+enum iwl_pcie_imr_status {
+ IMR_D2S_IDLE,
+ IMR_D2S_REQUESTED,
+ IMR_D2S_COMPLETED,
+ IMR_D2S_ERROR,
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state {
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
+ * @imr_status: imr dma state machine
+ * @wait_queue_head_t: imr wait queue for dma completion
* @rf_name: name/version of the CRF, if any
*/
struct iwl_trans_pcie {
@@ -363,7 +387,7 @@ struct iwl_trans_pcie {
/* PCI bus related data */
struct pci_dev *pci_dev;
- void __iomem *hw_base;
+ u8 __iomem *hw_base;
bool ucode_write_complete;
bool sx_complete;
@@ -414,7 +438,8 @@ struct iwl_trans_pcie {
bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
-
+ enum iwl_pcie_imr_status imr_status;
+ wait_queue_head_t imr_waitq;
char rf_name[32];
};
@@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8247014278f3..68a4572cee53 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->trans_cfg->mq_rx_supported)
+ if (!trans->trans_cfg->mq_rx_supported)
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
+ HBUS_TARG_WRPTR_RX_Q(rxq->id));
+ else
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
- else
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -652,23 +655,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
- struct iwl_rx_transfer_desc *rx_td;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_transfer_desc);
- if (use_rx_td)
- return sizeof(*rx_td);
- else
- return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ return trans->trans_cfg->mq_rx_supported ?
+ sizeof(__le64) : sizeof(__le32);
+}
+
+static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
+{
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return sizeof(struct iwl_rx_completion_desc_bz);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_completion_desc);
+
+ return sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210);
- int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ int free_size = iwl_pcie_free_bd_size(trans);
if (rxq->bd)
dma_free_coherent(trans->dev,
@@ -682,8 +692,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
if (rxq->used_bd)
dma_free_coherent(trans->dev,
- (use_rx_td ? sizeof(*rxq->cd) :
- sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
@@ -707,7 +717,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else
rxq->queue_size = RX_QUEUE_SIZE;
- free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ free_size = iwl_pcie_free_bd_size(trans);
/*
* Allocate the circular buffer of Read Buffer Descriptors
@@ -720,14 +730,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
- (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
&rxq->used_bd_dma,
GFP_KERNEL);
if (!rxq->used_bd)
goto err;
}
- rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
+ rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
@@ -1307,9 +1318,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
"Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxq->id, offset,
iwl_get_cmd_string(trans,
- iwl_cmd_id(pkt->hdr.cmd,
- pkt->hdr.group_id,
- 0)),
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
pkt->hdr.group_id, pkt->hdr.cmd,
le16_to_cpu(pkt->hdr.sequence));
@@ -1319,7 +1328,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
/* check that what the device tells us made sense */
- if (offset > max_len)
+ if (len < sizeof(*pkt) || offset > max_len)
break;
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
@@ -1419,6 +1428,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
u16 vid;
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
@@ -1426,11 +1436,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- vid = le16_to_cpu(rxq->cd[i].rbid);
- *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_rx_completion_desc *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else {
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+ __le32 *cd = rxq->used_bd;
+
+ vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
}
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
@@ -1608,10 +1627,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
- if (WARN_ONCE(!rxq,
- "[%d] Got MSI-X interrupt before we have Rx queues",
- entry->entry))
+ if (!rxq) {
+ if (net_ratelimit())
+ IWL_ERR(trans,
+ "[%d] Got MSI-X interrupt before we have Rx queues\n",
+ entry->entry);
return IRQ_NONE;
+ }
lock_map_acquire(&trans->sync_cmd_lockdep_map);
IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
@@ -1954,7 +1976,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT, CSR_INT_BIT_RX_PERIODIC);
}
/* Sending RX interrupt require many steps to be done in the
- * the device:
+ * device:
* 1- write interrupt to current index in ICT table.
* 2- dma RX frame.
* 3- update RX shared data to indicate last write index.
@@ -1998,6 +2020,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Wake up uCode load routine, now that load is complete */
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
+ /* Wake up IMR write routine, now that write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
}
if (inta & ~handled) {
@@ -2211,7 +2238,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* This "Tx" DMA channel is used only for loading uCode */
- if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
+ trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
+ isr_stats->tx++;
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+ } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
isr_stats->tx++;
/*
@@ -2220,6 +2257,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
*/
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
+
+ /* Wake up IMR routine once write to SRAM is complete */
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_COMPLETED;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -2234,7 +2277,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh);
isr_stats->sw++;
/* during FW reset flow report errors from there */
- if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
+ trans_pcie->imr_status = IMR_D2S_ERROR;
+ wake_up(&trans_pcie->imr_waitq);
+ } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
trans_pcie->fw_reset_state = FW_RESET_ERROR;
wake_up(&trans_pcie->fw_reset_waitq);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index ef14584fc0a1..8be3c3c8c68b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -745,7 +745,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
iwl_set_bits_prph(trans, LMPM_CHICK,
LMPM_CHICK_EXTENDED_ADDR_SPACE);
- memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+ memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
copy_size);
@@ -1112,7 +1112,7 @@ static const struct iwl_causes_list causes_list_pre_bz[] = {
};
static const struct iwl_causes_list causes_list_bz[] = {
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15},
};
static void iwl_pcie_map_list(struct iwl_trans *trans,
@@ -1948,6 +1948,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
trans->txqs.page_offs = trans_cfg->cb_data_offs;
trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+ trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@@ -2863,7 +2864,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
+ u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -3468,7 +3469,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
.interrupts = iwl_trans_pci_interrupts, \
- .sync_nmi = iwl_trans_pcie_sync_nmi \
+ .sync_nmi = iwl_trans_pcie_sync_nmi, \
+ .imr_dma_data = iwl_trans_pcie_copy_imr \
static const struct iwl_trans_ops trans_ops_pcie = {
IWL_TRANS_COMMON_OPS,
@@ -3553,6 +3555,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
init_waitqueue_head(&trans_pcie->fw_reset_waitq);
+ init_waitqueue_head(&trans_pcie->imr_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -3681,3 +3684,41 @@ out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
+
+void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ iwl_write_prph(trans, IMR_UREG_CHICK,
+ iwl_read_prph(trans, IMR_UREG_CHICK) |
+ IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
+ (u32)(src_addr & 0xFFFFFFFF));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
+ iwl_get_dma_hi_addr(src_addr));
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
+ iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
+ IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
+}
+
+int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
+ u32 dst_addr, u64 src_addr, u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret = -1;
+
+ trans_pcie->imr_status = IMR_D2S_REQUESTED;
+ iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
+ ret = wait_event_timeout(trans_pcie->imr_waitq,
+ trans_pcie->imr_status !=
+ IMR_D2S_REQUESTED, 5 * HZ);
+ if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
+ IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
+ iwl_trans_pcie_dump_regs(trans);
+ return -ETIMEDOUT;
+ }
+ trans_pcie->imr_status = IMR_D2S_IDLE;
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 34bde8c87324..c72a84d8bb4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -213,7 +213,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -222,7 +222,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 4f6c187eed69..3546c5269c3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
void *tfd;
u32 num_tbs;
- tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+ tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
memset(tfd, 0, trans->txqs.tfd.size);
@@ -540,7 +540,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_256_ba_txq_size);
+ trans->cfg->min_ba_txq_size);
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
@@ -594,7 +594,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
trans->cfg->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_256_ba_txq_size);
+ trans->cfg->min_ba_txq_size);
ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
@@ -877,7 +877,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
ARRAY_SIZE(zero_val));
}
@@ -1114,7 +1114,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -1123,7 +1123,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
@@ -1201,7 +1201,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
- cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
+ cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index 0730657d54bf..726185d6fab8 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "fw/api/commands.h"
#include "fw/api/tx.h"
+#include "fw/api/datapath.h"
#include "queue/tx.h"
#include "iwl-fh.h"
#include "iwl-scd.h"
@@ -41,13 +43,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+ struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
/* Starting from AX210, the HW expects bytes */
WARN_ON(trans->txqs.bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
@@ -189,7 +191,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
return NULL;
/* set the chaining pointer to the previous page if there */
- *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
*page_ptr = ret;
return ret;
@@ -314,7 +316,7 @@ alloc:
return NULL;
p->pos = page_address(p->page);
/* set the chaining pointer to NULL */
- *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+ *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
out:
*page_ptr = p->page;
get_page(p->page);
@@ -963,7 +965,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
while (next) {
struct page *tmp = next;
- next = *(void **)(page_address(next) + PAGE_SIZE -
+ next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
sizeof(void *));
__free_page(tmp);
}
@@ -1083,9 +1085,8 @@ error:
return -ENOMEM;
}
-static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout)
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
{
size_t bc_tbl_size, bc_tbl_entries;
struct iwl_txq *txq;
@@ -1097,18 +1098,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
bc_tbl_entries = bc_tbl_size / sizeof(u16);
if (WARN_ON(size > bc_tbl_entries))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
&txq->bc_tbl.dma);
if (!txq->bc_tbl.addr) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
kfree(txq);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
ret = iwl_txq_alloc(trans, txq, size, false);
@@ -1124,12 +1125,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
txq->wd_timeout = msecs_to_jiffies(timeout);
- *intxq = txq;
- return 0;
+ return txq;
error:
iwl_txq_gen2_free_memory(trans, txq);
- return ret;
+ return ERR_PTR(ret);
}
static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -1186,30 +1186,61 @@ error_free_resp:
return ret;
}
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size, unsigned int timeout)
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
{
- struct iwl_txq *txq = NULL;
- struct iwl_tx_queue_cfg_cmd cmd = {
- .flags = flags,
- .sta_id = sta_id,
- .tid = tid,
- };
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
struct iwl_host_cmd hcmd = {
- .id = cmd_id,
- .len = { sizeof(cmd) },
- .data = { &cmd, },
.flags = CMD_WANT_SKB,
};
int ret;
- ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
- if (ret)
- return ret;
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->hw_rev_step == SILICON_A_STEP)
+ size = 4096;
+
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
- cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ if (trans->txqs.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
@@ -1307,10 +1338,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
dma_addr_t hi_len;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
- return (dma_addr_t)(le64_to_cpu(tb->addr));
+ return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
}
tfd = _tfd;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
index 20efc62acf13..eca53bfd326d 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2022 Intel Corporation
*/
#ifndef __iwl_trans_queue_tx_h__
#define __iwl_trans_queue_tx_h__
@@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
if (trans->trans_cfg->use_tfh)
idx = iwl_txq_get_cmd_index(txq, idx);
- return txq->tfds + trans->txqs.tfd.size * idx;
+ return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
}
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
@@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
struct iwl_tfh_tfd *tfd);
-int iwl_txq_dyn_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout);
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
@@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
struct iwl_tfd *tfd;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
}
tfd = (struct iwl_tfd *)_tfd;
@@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
struct iwl_tfd_tb *tb;
if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
- return le16_to_cpu(tb->tb_len);
+ return le16_to_cpu(tfh_tb->tb_len);
}
tfd = (struct iwl_tfd *)_tfd;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fc5725f6daee..28bfa7b7b73c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
/*
@@ -173,9 +173,23 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
}
};
+static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = {
+ .n_reg_rules = 6,
+ .alpha2 = "99",
+ .reg_rules = {
+ REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
+ REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 40, 0, 30, 0),
+ REG_RULE(5745 - 10, 5825 + 10, 40, 0, 30, 0),
+ REG_RULE(5855 - 10, 5925 + 10, 40, 0, 33, 0),
+ REG_RULE(5955 - 10, 7125 + 10, 320, 0, 33, 0),
+ }
+};
+
static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
&hwsim_world_regdom_custom_01,
&hwsim_world_regdom_custom_02,
+ &hwsim_world_regdom_custom_03,
};
struct hwsim_vif_priv {
@@ -475,16 +489,16 @@ static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = {
0 },
};
-static void hwsim_init_s1g_channels(struct ieee80211_channel *channels)
+static void hwsim_init_s1g_channels(struct ieee80211_channel *chans)
{
int ch, freq;
for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) {
freq = 902000 + (ch + 1) * 500;
- channels[ch].band = NL80211_BAND_S1GHZ;
- channels[ch].center_freq = KHZ_TO_MHZ(freq);
- channels[ch].freq_offset = freq % 1000;
- channels[ch].hw_value = ch + 1;
+ chans[ch].band = NL80211_BAND_S1GHZ;
+ chans[ch].center_freq = KHZ_TO_MHZ(freq);
+ chans[ch].freq_offset = freq % 1000;
+ chans[ch].hw_value = ch + 1;
}
}
@@ -503,6 +517,8 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+#define DEFAULT_RX_RSSI -50
+
static const u32 hwsim_ciphers[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -652,6 +668,7 @@ struct mac80211_hwsim_data {
ARRAY_SIZE(hwsim_channels_6ghz)];
struct ieee80211_channel *channel;
+ enum nl80211_chan_width bw;
u64 beacon_int /* beacon interval in us */;
unsigned int rx_filter;
bool started, idle, scanning;
@@ -690,6 +707,9 @@ struct mac80211_hwsim_data {
u64 rx_bytes;
u64 tx_dropped;
u64 tx_failed;
+
+ /* RSSI in rx status of the receiver */
+ int rx_rssi;
};
static const struct rhashtable_params hwsim_rht_params = {
@@ -803,6 +823,40 @@ extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
#define hwsim_virtio_enabled false
#endif
+static int hwsim_get_chanwidth(enum nl80211_chan_width bw)
+{
+ switch (bw) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return 20;
+ case NL80211_CHAN_WIDTH_40:
+ return 40;
+ case NL80211_CHAN_WIDTH_80:
+ return 80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 160;
+ case NL80211_CHAN_WIDTH_320:
+ return 320;
+ case NL80211_CHAN_WIDTH_5:
+ return 5;
+ case NL80211_CHAN_WIDTH_10:
+ return 10;
+ case NL80211_CHAN_WIDTH_1:
+ return 1;
+ case NL80211_CHAN_WIDTH_2:
+ return 2;
+ case NL80211_CHAN_WIDTH_4:
+ return 4;
+ case NL80211_CHAN_WIDTH_8:
+ return 8;
+ case NL80211_CHAN_WIDTH_16:
+ return 16;
+ }
+
+ return INT_MAX;
+}
+
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan);
@@ -964,6 +1018,29 @@ DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group,
hwsim_fops_group_read, hwsim_fops_group_write,
"%llx\n");
+static int hwsim_fops_rx_rssi_read(void *dat, u64 *val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ *val = data->rx_rssi;
+ return 0;
+}
+
+static int hwsim_fops_rx_rssi_write(void *dat, u64 val)
+{
+ struct mac80211_hwsim_data *data = dat;
+ int rssi = (int)val;
+
+ if (rssi >= 0 || rssi < -100)
+ return -EINVAL;
+
+ data->rx_rssi = rssi;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_rx_rssi,
+ hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write,
+ "%lld\n");
+
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1482,8 +1559,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.bw = RATE_INFO_BW_20;
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
- /* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = -50;
+ /* TODO: simulate optional packet loss */
+ rx_status.signal = data->rx_rssi;
if (info->control.vif)
rx_status.signal += info->control.vif->bss_conf.txpower;
@@ -1595,7 +1672,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
bool ack;
- u32 _portid;
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+ u32 _portid, i;
if (WARN_ON(skb->len < 10)) {
/* Should not happen; just a sanity check for addr1 use */
@@ -1605,14 +1683,17 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (!data->use_chanctx) {
channel = data->channel;
+ confbw = data->bw;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
} else {
chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
- if (chanctx_conf)
+ if (chanctx_conf) {
channel = chanctx_conf->def.chan;
- else
+ confbw = chanctx_conf->def.width;
+ } else {
channel = NULL;
+ }
}
if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
@@ -1636,6 +1717,25 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
+ for (i = 0; i < ARRAY_SIZE(txi->control.rates); i++) {
+ u16 rflags = txi->control.rates[i].flags;
+ /* initialize to data->bw for 5/10 MHz handling */
+ enum nl80211_chan_width bw = data->bw;
+
+ if (txi->control.rates[i].idx == -1)
+ break;
+
+ if (rflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_40;
+ else if (rflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_80;
+ else if (rflags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ bw = NL80211_CHAN_WIDTH_160;
+
+ if (WARN_ON(hwsim_get_chanwidth(bw) > hwsim_get_chanwidth(confbw)))
+ return;
+ }
+
if (skb->len >= 24 + 8 &&
ieee80211_is_probe_resp(hdr->frame_control)) {
/* fake header transmission time */
@@ -1935,6 +2035,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
data->channel = conf->chandef.chan;
+ data->bw = conf->chandef.width;
for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) {
if (data->survey_data[idx].channel &&
@@ -1946,6 +2047,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
}
} else {
data->channel = conf->chandef.chan;
+ data->bw = conf->chandef.width;
}
mutex_unlock(&data->mutex);
@@ -2077,12 +2179,49 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
}
+static void
+mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ u32 bw = U32_MAX;
+ enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
+
+ switch (sta->bandwidth) {
+#define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break
+ C(20);
+ C(40);
+ C(80);
+ C(160);
+ C(320);
+#undef C
+ }
+
+ if (!data->use_chanctx) {
+ confbw = data->bw;
+ } else {
+ struct ieee80211_chanctx_conf *chanctx_conf =
+ rcu_dereference(vif->chanctx_conf);
+
+ if (!WARN_ON(!chanctx_conf))
+ confbw = chanctx_conf->def.width;
+ }
+
+ WARN(bw > hwsim_get_chanwidth(confbw),
+ "intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n",
+ vif->addr, sta->addr, bw, sta->bandwidth,
+ hwsim_get_chanwidth(data->bw), data->bw);
+}
+
static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
hwsim_check_magic(vif);
hwsim_set_sta_magic(sta);
+ mac80211_hwsim_sta_rc_update(hw, vif, sta, 0);
return 0;
}
@@ -2658,6 +2797,7 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
.sta_add = mac80211_hwsim_sta_add, \
.sta_remove = mac80211_hwsim_sta_remove, \
.sta_notify = mac80211_hwsim_sta_notify, \
+ .sta_rc_update = mac80211_hwsim_sta_rc_update, \
.set_tim = mac80211_hwsim_set_tim, \
.conf_tx = mac80211_hwsim_conf_tx, \
.get_survey = mac80211_hwsim_get_survey, \
@@ -2809,7 +2949,7 @@ out_err:
nlmsg_free(mcast_skb);
}
-static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
{
.types_mask = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
@@ -2855,6 +2995,66 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xffff),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * Since B0, B1, B2 and B3 are not set in
+ * the supported channel width set field in the
+ * HE PHY capabilities information field the
+ * device is a 20MHz only device on 2.4GHz band.
+ */
+ .only_20mhz = {
+ .rx_tx_mcs7_max_nss = 0x88,
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -2897,7 +3097,7 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
#endif
};
-static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
{
/* TODO: should we support other types, e.g., P2P?*/
.types_mask = BIT(NL80211_IFTYPE_STATION) |
@@ -2948,6 +3148,81 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -2995,7 +3270,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
#endif
};
-static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
+static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
{
/* TODO: should we support other types, e.g., P2P?*/
.types_mask = BIT(NL80211_IFTYPE_STATION) |
@@ -3055,6 +3330,93 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#ifdef CONFIG_MAC80211_MESH
{
@@ -3111,22 +3473,22 @@ static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
#endif
};
-static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
+static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
{
u16 n_iftype_data;
if (sband->band == NL80211_BAND_2GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_2ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_2ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_2ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_2ghz;
} else if (sband->band == NL80211_BAND_5GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_5ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_5ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_5ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_5ghz;
} else if (sband->band == NL80211_BAND_6GHZ) {
- n_iftype_data = ARRAY_SIZE(he_capa_6ghz);
+ n_iftype_data = ARRAY_SIZE(sband_capa_6ghz);
sband->iftype_data =
- (struct ieee80211_sband_iftype_data *)he_capa_6ghz;
+ (struct ieee80211_sband_iftype_data *)sband_capa_6ghz;
} else {
return;
}
@@ -3318,6 +3680,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->n_cipher_suites = param->n_ciphers;
}
+ data->rx_rssi = DEFAULT_RX_RSSI;
+
INIT_DELAYED_WORK(&data->roc_start, hw_roc_start);
INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
@@ -3449,7 +3813,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
- mac80211_hwsim_he_capab(sband);
+ mac80211_hwsim_sband_capab(sband);
hw->wiphy->bands[band] = sband;
}
@@ -3509,6 +3873,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
debugfs_create_file("group", 0666, data->debugfs, data,
&hwsim_fops_group);
+ debugfs_create_file("rx_rssi", 0666, data->debugfs, data,
+ &hwsim_fops_rx_rssi);
if (!data->use_chanctx)
debugfs_create_file("dfs_simulate_radar", 0222,
data->debugfs,
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index 9f24b0760e1f..c34d30f7cbe0 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -147,7 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
- netif_rx_any_context(skb);
+ netif_rx(skb);
ret = 0;
done:
@@ -262,7 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
- netif_rx_any_context(skb);
+ netif_rx(skb);
ret = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 18e89777b784..630e1679c3f9 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -389,7 +389,7 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
{
const u8 *vendor_ie;
const u8 *wmm_ie;
- u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
+ static const u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02};
vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 245ff644f81e..4e49ed21c5ce 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -350,7 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* Forward multicast/broadcast packet to upper layer*/
- netif_rx_any_context(skb);
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index d583fa600a29..d5edb1e89f5b 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -488,7 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- netif_rx_any_context(skb);
+ netif_rx(skb);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3a9af8931c35..02daeefb0761 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,7 +93,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
- if (!q)
+ if (!q || !q->ndesc)
return;
/* clear descriptors */
@@ -233,7 +233,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
struct mt76_queue_entry entry;
int last;
- if (!q)
+ if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
@@ -448,6 +448,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
+ if (!q->ndesc)
+ return 0;
+
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
@@ -465,6 +468,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
qbuf.addr = addr + offset;
qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
@@ -484,6 +488,9 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
void *buf;
bool more;
+ if (!q->ndesc)
+ return;
+
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
@@ -508,6 +515,9 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i;
+ if (!q->ndesc)
+ return;
+
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8bb1c7ab5b50..5b53d008eb66 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -180,7 +180,7 @@ static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
{ .start_freq = 5725, .end_freq = 5950, },
};
-const struct cfg80211_sar_capa mt76_sar_capa = {
+static const struct cfg80211_sar_capa mt76_sar_capa = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
.freq_ranges = &mt76_sar_freq_ranges[0],
@@ -823,6 +823,10 @@ void mt76_set_channel(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
+ if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
+ phy->chandef.width != chandef->width)
+ phy->dfs_state = MT_DFS_STATE_UNKNOWN;
+
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
@@ -928,6 +932,36 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
+static int
+mt76_rx_signal(struct mt76_rx_status *status)
+{
+ s8 *chain_signal = status->chain_signal;
+ int signal = -128;
+ u8 chains;
+
+ for (chains = status->chains; chains; chains >>= 1, chain_signal++) {
+ int cur, diff;
+
+ cur = *chain_signal;
+ if (!(chains & BIT(0)) ||
+ cur > 0)
+ continue;
+
+ if (cur > signal)
+ swap(cur, signal);
+
+ diff = signal - cur;
+ if (diff == 0)
+ signal += 3;
+ else if (diff <= 2)
+ signal += 2;
+ else if (diff <= 6)
+ signal += 1;
+ }
+
+ return signal;
+}
+
static void
mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_hw **hw,
@@ -956,6 +990,9 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->ampdu_reference = mstat.ampdu_ref;
status->device_timestamp = mstat.timestamp;
status->mactime = mstat.timestamp;
+ status->signal = mt76_rx_signal(&mstat);
+ if (status->signal <= -128)
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control))
@@ -1604,3 +1641,27 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct mt76_dev *dev = phy->dev;
+
+ if (dev->region == NL80211_DFS_UNSET ||
+ test_bit(MT76_SCANNING, &phy->state))
+ return MT_DFS_STATE_DISABLED;
+
+ if (!hw->conf.radar_enabled) {
+ if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
+ (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return MT_DFS_STATE_ACTIVE;
+
+ return MT_DFS_STATE_DISABLED;
+ }
+
+ if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
+ return MT_DFS_STATE_CAC;
+
+ return MT_DFS_STATE_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 404c3d1a70d6..882fb5d2517f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -19,7 +19,7 @@
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
-#define MT_SKB_HEAD_LEN 128
+#define MT_SKB_HEAD_LEN 256
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
@@ -85,6 +85,7 @@ enum mt76_rxq_id {
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
MT_RXQ_EXT_WA,
+ MT_RXQ_MAIN_WA,
__MT_RXQ_MAX
};
@@ -104,6 +105,13 @@ enum mt76_cipher_type {
MT_CIPHER_GCMP_256,
};
+enum mt76_dfs_state {
+ MT_DFS_STATE_UNKNOWN,
+ MT_DFS_STATE_DISABLED,
+ MT_DFS_STATE_CAC,
+ MT_DFS_STATE_ACTIVE,
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
@@ -224,7 +232,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_HDR_TRANS,
};
-#define MT76_N_WCIDS 288
+#define MT76_N_WCIDS 544
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
@@ -496,7 +504,7 @@ struct mt76_usb {
} mcu;
};
-#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
+#define MT76S_XMIT_BUF_SZ 0x3fe00
#define MT76S_NUM_TX_ENTRIES 256
#define MT76S_NUM_RX_ENTRIES 512
struct mt76_sdio {
@@ -506,7 +514,8 @@ struct mt76_sdio {
struct work_struct stat_work;
- u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
+ u8 *xmit_buf;
+ u32 xmit_buf_sz;
struct sdio_func *func;
void *intr_data;
@@ -621,6 +630,7 @@ struct mt76_vif {
u8 band_idx;
u8 wmm_idx;
u8 scan_seq_num;
+ u8 cipher;
};
struct mt76_phy {
@@ -636,6 +646,7 @@ struct mt76_phy {
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ enum mt76_dfs_state dfs_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
@@ -897,8 +908,8 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
- for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
- (dev)->q_rx[i].ndesc; i++)
+ for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
+ if ((dev)->q_rx[i].ndesc)
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
@@ -1181,6 +1192,7 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1262,13 +1274,21 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
struct mt76_sta_stats *stats);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t len);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
- bool ext);
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len);
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+ u32 addr, u32 val);
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ struct mt76_bus_ops *ops);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 415ea17b9be6..37b092e3ea51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -76,7 +76,7 @@ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
if (q == MT_RXQ_MCU) {
if (type == PKT_TYPE_RX_EVENT)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index a272d64808c3..17713c821d80 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -643,11 +643,6 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
dev->rssi_offset[1];
- status->signal = status->chain_signal[0];
- if (status->chains & BIT(1))
- status->signal = max(status->signal,
- status->chain_signal[1]);
-
if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
status->bw = RATE_INFO_BW_40;
@@ -1135,7 +1130,7 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[1], MT_TXS1_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1249,14 +1244,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
struct mt7603_sta *msta = NULL;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[4]);
- pid = FIELD_GET(MT_TXS4_PID, txs);
- txs = le32_to_cpu(txs_data[3]);
- wcidx = FIELD_GET(MT_TXS3_WCID, txs);
+ pid = le32_get_bits(txs_data[4], MT_TXS4_PID);
+ wcidx = le32_get_bits(txs_data[3], MT_TXS3_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 2b546bc05d82..83c5eec5b163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -641,6 +641,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index b53528014fbc..c26b45a09923 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -105,10 +105,10 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
- if (val == pm->enable)
- return 0;
+ mutex_lock(&dev->mt76.mutex);
- mt7615_mutex_acquire(dev);
+ if (val == pm->enable)
+ goto out;
if (dev->phy.n_beacon_vif) {
ret = -EBUSY;
@@ -119,9 +119,16 @@ mt7615_pm_set(void *data, u64 val)
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
+ /* make sure the chip is awake here and ps_work is scheduled
+ * just at end of the this routine.
+ */
+ pm->enable = false;
+ mt76_connac_pm_wake(&dev->mphy, pm);
+
pm->enable = val;
+ mt76_connac_power_save_sched(&dev->mphy, pm);
out:
- mt7615_mutex_release(dev);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
@@ -436,11 +443,16 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct mt7615_dev *dev = file->private_data;
- char buf[32 * ((ETH_ALEN * 3) + 4) + 1];
+ u32 len = 32 * ((ETH_ALEN * 3) + 4) + 1;
u8 addr[ETH_ALEN];
+ char *buf;
int ofs = 0;
int i;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
for (i = 0; i < 32; i++) {
if (!(dev->muar_mask & BIT(i)))
continue;
@@ -451,10 +463,13 @@ mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf,
put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr);
put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) &
MT_WF_RMAC_MAR1_ADDR), addr + 4);
- ofs += snprintf(buf + ofs, sizeof(buf) - ofs, "%d=%pM\n", i, addr);
+ ofs += snprintf(buf + ofs, len - ofs, "%d=%pM\n", i, addr);
}
- return simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+ ofs = simple_read_from_buffer(userbuf, count, ppos, buf, ofs);
+
+ kfree(buf);
+ return ofs;
}
static ssize_t
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index a753c7476d31..a06dcbb8c673 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -552,7 +552,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
- dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7615_testmode_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ec25e5a95d44..bd687f7de628 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -253,15 +253,15 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
+ if (le32_get_bits(rxd[1], MT_RXD1_NORMAL_ADDR_TYPE) !=
MT_RXD1_NORMAL_U2M)
return -EINVAL;
@@ -275,47 +275,53 @@ static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
- qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
- ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]);
-
+ frame_control = le32_get_bits(rxd[4], MT_RXD4_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[7],
+ IEEE80211_HT_CTL_LEN);
+
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[6], MT_RXD6_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -570,15 +576,6 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
mt7615_mac_fill_tm_rx(mphy->priv, rxd);
@@ -1430,7 +1427,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
- rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
+ rs_idx = !((u32)(le32_get_bits(txs_data[4], MT_TXS4_F0_TIMESTAMP) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
@@ -1561,14 +1558,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct mt76_wcid *wcid;
struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
- u32 txs;
u8 wcidx;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- pid = FIELD_GET(MT_TXS0_PID, txs);
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+ pid = le32_get_bits(txs_data[0], MT_TXS0_PID);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
if (pid == MT_PACKET_ID_NO_ACK)
return;
@@ -1642,9 +1636,10 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
mt7615_txwi_free(dev, txwi);
}
-static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)data;
+ void *end = data + len;
u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1655,21 +1650,25 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
}
- count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_ID_CNT);
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
+
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
__le32 *token = (__le32 *)&free->token[0];
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
+
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
- dev_kfree_skb(skb);
-
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
@@ -1677,6 +1676,29 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_rx_check);
+
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -1686,8 +1708,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
@@ -1698,7 +1720,8 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
- mt7615_mac_tx_free(dev, skb);
+ mt7615_mac_tx_free(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
@@ -1835,7 +1858,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
struct mt7615_dev *dev = phy->dev;
int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
bool ext_phy = phy != &dev->phy;
- u16 def_th = ofdm ? -98 : -110;
+ s16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
@@ -2068,6 +2091,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
int i;
if (mt76_is_sdio(mdev)) {
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
mt76_for_each_q_rx(mdev, i)
@@ -2103,6 +2127,14 @@ void mt7615_pm_power_save_work(struct work_struct *work)
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@@ -2160,21 +2192,24 @@ static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
struct mt7615_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
{
int err;
- err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
@@ -2185,7 +2220,8 @@ static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
@@ -2246,50 +2282,60 @@ mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
if (is_mt7663(&dev->mt76))
return 0;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7615_dfs_stop_radar_detector(phy);
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
-
- err = mt7615_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7615_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
+
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(phy);
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ ext_phy, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7615_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 82d625a16a62..d79cbdbd5a05 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -291,7 +291,8 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy)
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return;
if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
@@ -365,6 +366,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@@ -403,6 +405,11 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt7615_mutex_acquire(dev);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7615_mcu_add_bss_info(phy, vif, NULL, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@@ -424,6 +431,29 @@ out:
return err;
}
+static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int err;
+
+ if (!cfg80211_chandef_valid(&phy->mt76->chandef))
+ return -EINVAL;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ return err;
+
+ if (mt7615_firmware_offload(phy->dev))
+ return mt76_connac_mcu_set_rate_txpower(phy->mt76);
+
+ ieee80211_stop_queues(hw);
+ err = mt7615_set_channel(phy);
+ ieee80211_wake_queues(hw);
+
+ return err;
+}
+
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
@@ -683,6 +713,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
@@ -1323,6 +1356,7 @@ const struct ieee80211_ops mt7615_ops = {
.set_wakeup = mt7615_set_wakeup,
.set_rekey_data = mt7615_set_rekey_data,
#endif /* CONFIG_PM */
+ .set_sar_specs = mt7615_set_sar_specs,
};
EXPORT_SYMBOL_GPL(mt7615_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 759dcf0e6783..97e2a85cb728 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -71,19 +71,6 @@ struct mt7663_fw_buf {
#define IMG_CRC_LEN 4
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_VALID_RAM_ENTRY BIT(5)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
@@ -756,145 +743,7 @@ out:
static int
mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 rsv[3];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL),
- &req, sizeof(req), true);
-}
-
-static int
-mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct mt7615_phy *phy,
- bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
- struct bss_info_basic *bss;
- u8 wlan_idx = mvif->sta.wcid.idx;
- struct tlv *tlv;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable && sta) {
- struct mt7615_sta *msta;
-
- msta = (struct mt7615_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = wlan_idx;
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u8 omac_idx = mvif->mt76.omac_idx;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- if (vif->p2p)
- type = CONNECTION_P2P_GO;
- else
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- if (vif->p2p)
- type = CONNECTION_P2P_GC;
- else
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
-}
-
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
-static void
-mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
-{
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+ return mt76_connac_mcu_set_pm(&dev->mt76, band, state);
}
static int
@@ -913,13 +762,14 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return PTR_ERR(skb);
if (enable)
- mt7615_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7615_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
@@ -1030,7 +880,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
NULL, wtbl_hdr);
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
- NULL, wtbl_hdr);
+ NULL, wtbl_hdr, true, true);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
NULL, wtbl_hdr);
}
@@ -1057,19 +907,7 @@ mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb = NULL;
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
- WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
- wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(WTBL_UPDATE), true);
+ return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
@@ -1303,7 +1141,8 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
- enable, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), enable,
+ true);
}
static int
@@ -1451,20 +1290,6 @@ release_fw:
return ret;
}
-static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
const struct mt7615_fw_trailer *hdr,
@@ -1475,7 +1300,8 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
u32 len, addr, mode;
for (i = 0; i < n_region; i++) {
- mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ hdr[i].feature_set, is_cr4);
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
addr = le32_to_cpu(hdr[i].addr);
@@ -1723,7 +1549,8 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
buf = (const struct mt7663_fw_buf *)(base_addr - shift);
- mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ buf->feature_set, false);
addr = le32_to_cpu(buf->img_dest_addr);
len = le32_to_cpu(buf->img_size);
@@ -2064,27 +1891,6 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2214,7 +2020,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
struct mt76_power_limits limits;
s8 *limits_array = (s8 *)&limits;
int n_chains = hweight8(mphy->antenna_mask);
- int tx_power;
+ int tx_power = hw->conf.power_level * 2;
int i;
static const u8 sku_mapping[] = {
#define SKU_FIELD(_type, _field) \
@@ -2271,9 +2077,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
#undef SKU_FIELD
};
- tx_power = hw->conf.power_level * 2 -
- mt76_tx_power_nss_delta(n_chains);
-
+ tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
+ tx_power -= mt76_tx_power_nss_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits, tx_power);
mphy->txpower_cur = tx_power;
@@ -2346,10 +2151,13 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 33f72f3657d0..ce45c3bfc443 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -194,6 +194,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_check = mt7615_rx_check,
.rx_skb = mt7615_queue_rx_skb,
.rx_poll_complete = mt7615_rx_poll_complete,
.sta_ps = mt7615_sta_ps,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 6ff6d5800918..2e91f6a27d0f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -403,30 +403,9 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
-static inline bool is_mt7622(struct mt76_dev *dev)
-{
- if (!IS_ENABLED(CONFIG_MT7622_WMAC))
- return false;
-
- return mt76_chip(dev) == 0x7622;
-}
-
-static inline bool is_mt7615(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
-}
-
-static inline bool is_mt7611(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7611;
-}
-
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
@@ -530,6 +509,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -579,6 +559,7 @@ void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
struct mt76_queue_entry *e);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
int mt7663u_mcu_init(struct mt7615_dev *dev);
+int mt7663u_mcu_power_on(struct mt7615_dev *dev);
/* sdio */
int mt7663s_mcu_init(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 31c4a76b7f91..49ab3a1f3b9b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -56,7 +56,10 @@ static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7663s_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err)
return err;
@@ -98,7 +101,7 @@ static int mt7663s_probe(struct sdio_func *func,
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
- int i, ret;
+ int ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
@@ -137,16 +140,6 @@ static int mt7663s_probe(struct sdio_func *func,
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 0396ad532ba6..967641aebf5f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -17,9 +17,68 @@
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x043e, 0x310c, 0xff, 0xff, 0xff) },
{ },
};
+static u32 mt7663u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7663u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7663u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt7663u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
static void mt7663u_stop(struct ieee80211_hw *hw)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -65,6 +124,14 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
};
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt7663u_rr,
+ .wr = mt7663u_wr,
+ .rmw = mt7663u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt7663u_copy,
+ .type = MT76_BUS_USB,
+ };
struct usb_device *udev = interface_to_usbdev(usb_intf);
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
@@ -91,7 +158,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
INIT_WORK(&dev->mcu_work, mt7663u_init_work);
dev->reg_map = mt7663_usb_sdio_reg_map;
dev->ops = ops;
- ret = mt76u_init(mdev, usb_intf, true);
+ ret = __mt76u_init(mdev, usb_intf, &bus_ops);
if (ret < 0)
goto error;
@@ -99,27 +166,15 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
- if (mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
- FW_STATE_PWR_ON << 1, 500)) {
- dev_dbg(dev->mt76.dev, "Usb device already powered on\n");
- set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
- goto alloc_queues;
- }
-
- ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x0, 0x1, NULL, 0);
- if (ret)
- goto error;
-
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
- dev_err(dev->mt76.dev, "Timeout for power on\n");
- ret = -EIO;
- goto error;
+ ret = mt7663u_mcu_power_on(dev);
+ if (ret)
+ goto error;
+ } else {
+ set_bit(MT76_STATE_POWER_OFF, &dev->mphy.state);
}
-alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 0ebb4c3c336a..98bf2f6ae936 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -42,6 +42,26 @@ out:
return ret;
}
+int mt7663u_mcu_power_on(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+ MT_TOP_MISC2_FW_PWR_ON,
+ FW_STATE_PWR_ON << 1, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ }
+
+ return 0;
+}
+
int mt7663u_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7663u_mcu_ops = {
@@ -57,23 +77,17 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
- mt7615_mcu_restart(&dev->mt76);
+ ret = mt7615_mcu_restart(&dev->mt76);
+ if (ret)
+ return ret;
+
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
MT_TOP_MISC2_FW_PWR_ON, 0, 500))
return -EIO;
- ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x0, 0x1, NULL, 0);
+ ret = mt7663u_mcu_power_on(dev);
if (ret)
return ret;
-
- if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
- MT_TOP_MISC2_FW_PWR_ON,
- FW_STATE_PWR_ON << 1, 500)) {
- dev_err(dev->mt76.dev, "Timeout for power on\n");
- return -EIO;
- }
}
ret = __mt7663_load_firmware(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index e7f01c2978a2..400ba514460e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -45,9 +45,11 @@ enum {
};
struct mt76_connac_pm {
- bool enable;
- bool ds_enable;
- bool suspended;
+ bool enable:1;
+ bool enable_user:1;
+ bool ds_enable:1;
+ bool ds_enable_user:1;
+ bool suspended:1;
spinlock_t txq_lock;
struct {
@@ -83,6 +85,11 @@ struct mt76_connac_coredump {
unsigned long last_activity;
};
+struct mt76_connac_sta_key_conf {
+ s8 keyidx;
+ u8 key[16];
+};
+
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
static inline bool is_mt7922(struct mt76_dev *dev)
@@ -100,6 +107,69 @@ static inline bool is_mt7663(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7663;
}
+static inline bool is_mt7915(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7915;
+}
+
+static inline bool is_mt7916(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7906;
+}
+
+static inline bool is_mt7986(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7986;
+}
+
+static inline bool is_mt7622(struct mt76_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
+ return mt76_chip(dev) == 0x7622;
+}
+
+static inline bool is_mt7615(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_mt7611(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_connac_v1(struct mt76_dev *dev)
+{
+ return is_mt7615(dev) || is_mt7663(dev) || is_mt7622(dev);
+}
+
+static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static inline u8 mt76_connac_lmac_mapping(u8 ac)
+{
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index f79e3d5084f3..7cb17bf40e35 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -62,8 +62,8 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
};
int cmd;
- if (is_mt7921(dev) &&
- (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
+ if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
+ (is_mt7921(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -266,8 +266,8 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
-mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid)
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
.bss_idx = mvif->idx,
@@ -278,7 +278,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
- skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_mcu_msg_alloc(dev, NULL, len);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -286,7 +286,7 @@ mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
return skb;
}
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
+EXPORT_SYMBOL_GPL(__mt76_connac_mcu_alloc_sta_req);
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
@@ -310,12 +310,54 @@ mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
if (sta_hdr)
- sta_hdr->len = cpu_to_le16(sizeof(hdr));
+ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
return skb_put_data(nskb, &hdr, sizeof(hdr));
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u8 omac_idx = mvif->omac_idx;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GO;
+ else
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GC;
+ else
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ omac = (struct bss_info_omac *)tlv;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
+
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -376,9 +418,8 @@ void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
-static void
-mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct sta_rec_uapsd *uapsd;
struct tlv *tlv;
@@ -407,6 +448,7 @@ mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
}
uapsd->max_sp = sta->max_sp;
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_uapsd);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -420,13 +462,17 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
sizeof(*htr),
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+ htr->no_rx_trans = true;
if (vif->type == NL80211_IFTYPE_STATION)
htr->to_ds = true;
else
htr->from_ds = true;
+ if (!wcid)
+ return;
+
+ htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
htr->to_ds = true;
htr->from_ds = true;
@@ -461,6 +507,25 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET, NULL,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, NULL, wtbl_hdr);
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_EXT_CMD(WTBL_UPDATE), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_update_hdr_trans);
+
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
@@ -488,8 +553,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
- if (is_mt7921(dev) &&
- vif->type == NL80211_IFTYPE_STATION)
+ if (!is_connac_v1(dev) && vif->type == NL80211_IFTYPE_STATION)
memcpy(generic->peer_addr, vif->bss_conf.bssid,
ETH_ALEN);
else
@@ -506,7 +570,7 @@ void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
rx->rca2 = 1;
rx->rv = 1;
- if (is_mt7921(dev))
+ if (!is_connac_v1(dev))
return;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
@@ -819,9 +883,9 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
-static void
-mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
{
struct wtbl_smps *smps;
struct tlv *tlv;
@@ -829,30 +893,39 @@ mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
-
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- smps->smps = true;
+ smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
+ void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc)
{
struct wtbl_ht *ht = NULL;
struct tlv *tlv;
u32 flags = 0;
- if (sta->ht_cap.ht_supported) {
+ if (sta->ht_cap.ht_supported || sta->he_6ghz_capa.capa) {
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
+ ht->ldpc = ht_ldpc &&
+ !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+
+ if (sta->ht_cap.ht_supported) {
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ } else {
+ ht->af = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ ht->mm = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ }
+
ht->ht = true;
}
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported || sta->he_6ghz_capa.capa) {
struct wtbl_vht *vht;
u8 af;
@@ -860,7 +933,8 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
sizeof(*vht), wtbl_tlv,
sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->ldpc = vht_ldpc &&
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
@@ -871,7 +945,7 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
- if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
+ if (is_connac_v1(dev) && sta->ht_cap.ht_supported) {
/* sgi */
u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
@@ -939,7 +1013,8 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr);
+ sta_wtbl, wtbl_hdr,
+ true, true);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -973,13 +1048,13 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
}
if (enable && tx) {
- u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ static const u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
int i;
for (i = 7; i > 0; i--) {
@@ -1106,7 +1181,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ int cmd, bool enable, bool tx)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
@@ -1129,8 +1204,7 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
- ret = mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
@@ -1140,15 +1214,12 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
- return mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
-static u8
-mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band,
- struct ieee80211_sta *sta)
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
@@ -1156,7 +1227,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta_ht_cap *ht_cap;
u8 mode = 0;
- if (!is_mt7921(dev))
+ if (is_connac_v1(dev))
return 0x38;
if (sta) {
@@ -1180,7 +1251,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
mode |= PHY_MODE_A;
if (ht_cap->ht_supported)
@@ -1189,14 +1260,18 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap && he_cap->has_he && band == NL80211_BAND_5GHZ)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_5G;
+ } else if (band == NL80211_BAND_6GHZ) {
+ mode |= PHY_MODE_A | PHY_MODE_AN |
+ PHY_MODE_AC | PHY_MODE_AX_5G;
}
return mode;
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
-static const struct ieee80211_sta_he_cap *
+const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
enum nl80211_band band = phy->chandef.chan->band;
@@ -1206,6 +1281,7 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
return ieee80211_get_he_iftype_cap(sband, vif->type);
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
@@ -1438,7 +1514,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
struct ieee80211_channel **scan_list = sreq->channels;
struct mt76_dev *mdev = phy->dev;
- bool ext_phy = phy == mdev->phy2;
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_hw_scan_req *req;
struct sk_buff *skb;
@@ -1452,7 +1527,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
req->bss_idx = mvif->idx;
req->scan_type = sreq->n_ssids ? 1 : 0;
req->probe_req_num = sreq->n_ssids ? 2 : 0;
@@ -1560,7 +1635,6 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
struct mt76_connac_mcu_scan_channel *chan;
struct mt76_connac_sched_scan_req *req;
struct mt76_dev *mdev = phy->dev;
- bool ext_phy = phy == mdev->phy2;
struct cfg80211_match_set *match;
struct cfg80211_ssid *ssid;
struct sk_buff *skb;
@@ -1574,7 +1648,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
req->version = 1;
- req->seq_num = mvif->scan_seq_num | ext_phy << 7;
+ req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
u8 *addr = is_mt7663(phy->dev) ? req->mt7663.random_mac
@@ -2482,5 +2556,257 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
+static int
+mt76_connac_mcu_sta_key_tlv(struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec *sec;
+ u32 len = sizeof(*sec);
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ sec_key->key_id = sta_key_conf->keyidx;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, sta_key_conf->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ /* store key_conf for BIP batch update */
+ if (cipher == MCU_CIPHER_AES_CCMP) {
+ memcpy(sta_key_conf->key, key->key, key->keylen);
+ sta_key_conf->keyidx = key->keyidx;
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
+
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_ext_tlv);
+
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
+ struct bss_info_basic *bss;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+ bss = (struct bss_info_basic *)tlv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ case NL80211_IFTYPE_AP:
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_MULTI_BSSID)) {
+ u8 bssid_id = vif->bss_conf.bssid_indicator;
+ struct wiphy *wiphy = phy->hw->wiphy;
+
+ if (bssid_id > ilog2(wiphy->mbssid_max_interfaces))
+ return -EINVAL;
+
+ bss->non_tx_bssid = vif->bss_conf.bssid_index;
+ bss->max_bssid = bssid_id;
+ }
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ rcu_read_lock();
+ if (!sta)
+ sta = ieee80211_find_sta(vif,
+ vif->bss_conf.bssid);
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (sta) {
+ struct mt76_wcid *wcid;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wlan_idx = wcid->idx;
+ }
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss->network_type = cpu_to_le32(type);
+ bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
+ bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+ bss->cipher = mvif->cipher;
+
+ if (vif->type != NL80211_IFTYPE_MONITOR) {
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->phy_mode = mt76_connac_get_phy_mode(phy, vif,
+ chandef->chan->band, NULL);
+ } else {
+ memcpy(bss->bssid, phy->macaddr, ETH_ALEN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_basic_tlv);
+
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter)
+{
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx_lo;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 wlan_idx_hi;
+ u8 rsv[2];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = enter ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PM_STATE_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_pm);
+
+int mt76_connac_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(SET_RDD_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_rdd_cmd);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 5baf8370b7bd..c3c93338d56a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -6,6 +6,26 @@
#include "mt76_connac.h"
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_ENCRY_MODE BIT(4)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
+#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
struct tlv {
__le16 tag;
__le16 len;
@@ -570,6 +590,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_muru) + \
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
@@ -956,6 +977,7 @@ enum {
MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_MURU_CTRL = 0x9f,
MCU_EXT_CMD_SET_SPR = 0xa8,
@@ -971,6 +993,7 @@ enum {
MCU_UNI_CMD_SUSPEND = 0x05,
MCU_UNI_CMD_OFFLOAD = 0x06,
MCU_UNI_CMD_HIF_CTRL = 0x07,
+ MCU_UNI_CMD_SNIFFER = 0x24,
};
enum {
@@ -996,7 +1019,8 @@ enum {
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
- MCU_CE_CMD_SET_ROC = 0x1d,
+ MCU_CE_CMD_SET_ROC = 0x1c,
+ MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
@@ -1427,6 +1451,51 @@ struct mt76_connac_config {
u8 data[320];
} __packed;
+static inline enum mcu_cipher_type
+mt76_connac_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MCU_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MCU_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MCU_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MCU_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MCU_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MCU_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MCU_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MCU_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MCU_CIPHER_WAPI;
+ default:
+ return MCU_CIPHER_NONE;
+ }
+}
+
+static inline u32
+mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
+ DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
+ if (is_mt7921(dev))
+ ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
+ DL_CONFIG_ENCRY_MODE_SEL : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@@ -1436,7 +1505,7 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
{
*wlan_idx_hi = 0;
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
*wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0;
*wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0;
} else {
@@ -1445,8 +1514,16 @@ mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
struct sk_buff *
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len);
+static inline struct sk_buff *
mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid);
+ struct mt76_wcid *wcid)
+{
+ return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+}
+
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
int cmd, void *sta_wtbl, struct sk_buff **skb);
@@ -1476,13 +1553,16 @@ void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv);
+ void *wtbl_tlv, bool ht_ldpc, bool vht_ldpc);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx, void *sta_wtbl,
@@ -1496,7 +1576,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx);
+ int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
@@ -1546,4 +1626,32 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+
+const struct ieee80211_sta_he_cap *
+mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta);
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd);
+
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u16 wlan_idx,
+ bool enable);
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
+int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val);
#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 436daf6d6d86..0422c332354a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -245,7 +245,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(mdev);
- ret = mt76u_init(mdev, usb_intf, false);
+ ret = mt76u_init(mdev, usb_intf);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 44d1a92d9a90..f76fd22ee035 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -103,7 +103,8 @@ struct mt76x02_dev {
u8 tbtt_count;
u32 tx_hang_reset;
- u8 tx_hang_check;
+ u8 tx_hang_check[4];
+ u8 beacon_hang_check;
u8 mcu_timeout;
struct mt76x02_calibration cal;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index a601350531cd..024a5c0a5a57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -823,10 +823,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
-
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- dev->mt76.region != NL80211_DFS_UNSET) {
+ if (mt76_phy_dfs_state(&dev->mphy) > MT_DFS_STATE_DISABLED) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index a404fd7ea968..2afad8c76ca6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -860,9 +860,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
rxwi->rssi[1],
1);
- signal = max_t(s8, signal, status->chain_signal[1]);
}
- status->signal = signal;
status->freq = dev->mphy.chandef.chan->center_freq;
status->band = dev->mphy.chandef.chan->band;
@@ -1040,12 +1038,26 @@ EXPORT_SYMBOL_GPL(mt76x02_update_channel);
static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
{
- u32 val = mt76_rr(dev, 0x10f4);
+ if (dev->mt76.beacon_mask) {
+ if (mt76_rr(dev, MT_TX_STA_0) & MT_TX_STA_0_BEACONS) {
+ dev->beacon_hang_check = 0;
+ return;
+ }
- if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
- return;
+ if (++dev->beacon_hang_check < 10)
+ return;
+
+ dev->beacon_hang_check = 0;
+ } else {
+ u32 val = mt76_rr(dev, 0x10f4);
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+ }
+
+ dev_err(dev->mt76.dev, "MAC error detected\n");
- dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ mt76x02_wait_for_txrx_idle(&dev->mt76);
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
@@ -1178,8 +1190,7 @@ void mt76x02_mac_work(struct work_struct *work)
dev->mt76.aggr_stats[idx++] += val >> 16;
}
- if (!dev->mt76.beacon_mask)
- mt76x02_check_mac_err(dev);
+ mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
mt76x02_edcca_check(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index ec0de691129a..8bcd8afa0d3a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -348,18 +348,20 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
for (i = 0; i < 4; i++) {
q = dev->mphy.q_tx[i];
- if (!q->queued)
- continue;
-
prev_dma_idx = dev->mt76.tx_dma_idx[i];
dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
- if (prev_dma_idx == dma_idx)
- break;
+ if (!q->queued || prev_dma_idx != dma_idx) {
+ dev->tx_hang_check[i] = 0;
+ continue;
+ }
+
+ if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
+ return true;
}
- return i < 4;
+ return false;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -530,23 +532,13 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
if (test_bit(MT76_RESTART, &dev->mphy.state))
return;
- if (mt76x02_tx_hang(dev)) {
- if (++dev->tx_hang_check >= MT_TX_HANG_TH)
- goto restart;
- } else {
- dev->tx_hang_check = 0;
- }
-
- if (dev->mcu_timeout)
- goto restart;
-
- return;
+ if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
+ return;
-restart:
mt76x02_watchdog_reset(dev);
dev->tx_hang_reset++;
- dev->tx_hang_check = 0;
+ memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index fa7872ac22bf..fe0c5e3298bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -571,6 +571,8 @@
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_0_BEACONS GENMASK(31, 16)
+
#define MT_TX_STA_1 0x1710
#define MT_TX_STA_2 0x1714
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 2575369e44e2..55068f3252ef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -57,7 +57,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
mt76x02u_init_mcu(mdev);
- err = mt76u_init(mdev, intf, false);
+ err = mt76u_init(mdev, intf);
if (err < 0)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
index d98225da694c..f21282cea845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: ISC
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
- select MT76_CORE
+ select MT76_CONNAC_LIB
depends on MAC80211
depends on PCI
+ select RELAY
help
This adds support for MT7915-based wireless PCIe devices,
which support concurrent dual-band operation at both 5GHz
@@ -11,3 +12,13 @@ config MT7915E
OFDMA, spatial reuse and dual carrier modulation.
To compile this driver as a module, choose M here.
+
+config MT7986_WMAC
+ bool "MT7986 (SoC) WMAC support"
+ depends on MT7915E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ help
+ This adds support for the built-in WMAC on MT7986 SoC device
+ which has the same feature set as a MT7915, but enables 6E
+ support.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
index 80e49244348e..b794ceb79c37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -6,3 +6,4 @@ mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
debugfs.o mmio.o
mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7915e-$(CONFIG_MT7986_WMAC) += soc.o \ No newline at end of file
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index e96d1c31dd36..4e1ecaec8f4f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -1,9 +1,13 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/relay.h>
#include "mt7915.h"
#include "eeprom.h"
#include "mcu.h"
+#include "mac.h"
+
+#define FW_BIN_LOG_MAGIC 0x44e98caf
/** global debugfs **/
@@ -75,7 +79,11 @@ mt7915_radar_trigger(void *data, u64 val)
{
struct mt7915_dev *dev = data;
- return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
+ val, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
@@ -301,6 +309,53 @@ exit:
DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
static int
+mt7915_rdd_monitor(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
+ const char *bw;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!cfg80211_chandef_valid(chandef)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!dev->rdd2_phy) {
+ seq_puts(s, "not running\n");
+ goto out;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = "40";
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = "80";
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = "160";
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = "80P80";
+ break;
+ default:
+ bw = "20";
+ break;
+ }
+
+ seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
+ chandef->chan->hw_value, chandef->chan->center_freq,
+ bw, chandef->center_freq1);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
mt7915_fw_debug_wm_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
@@ -311,16 +366,31 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
DEBUG_SPL,
DEBUG_RPT_RX,
} debug;
+ bool tx, rx, en;
int ret;
dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
- ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm);
+ if (dev->fw_debug_bin)
+ val = 16;
+ else
+ val = dev->fw_debug_wm;
+
+ tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
+ rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
+ en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
return ret;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
- ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm);
+ if (debug == DEBUG_RPT_RX)
+ val = en && rx;
+ else
+ val = en && tx;
+
+ ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
return ret;
}
@@ -376,6 +446,65 @@ mt7915_fw_debug_wa_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
mt7915_fw_debug_wa_set, "%lld\n");
+static struct dentry *
+create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
+ struct rchan_buf *buf, int *is_global)
+{
+ struct dentry *f;
+
+ f = debugfs_create_file("fwlog_data", mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(f))
+ return NULL;
+
+ *is_global = 1;
+
+ return f;
+}
+
+static int
+remove_buf_file_cb(struct dentry *f)
+{
+ debugfs_remove(f);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_bin_set(void *data, u64 val)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = create_buf_file_cb,
+ .remove_buf_file = remove_buf_file_cb,
+ };
+ struct mt7915_dev *dev = data;
+
+ if (!dev->relay_fwlog)
+ dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
+ 1500, 512, &relay_cb, NULL);
+ if (!dev->relay_fwlog)
+ return -ENOMEM;
+
+ dev->fw_debug_bin = val;
+
+ relay_reset(dev->relay_fwlog);
+
+ return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm);
+}
+
+static int
+mt7915_fw_debug_bin_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw_debug_bin;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7915_fw_debug_bin_get,
+ mt7915_fw_debug_bin_set, "%lld\n");
+
static int
mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
@@ -419,12 +548,12 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
- range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(phy->band_idx, i));
for (i = 0; i < ARRAY_SIZE(bound); i++)
bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
- seq_printf(file, "\nPhy %d\n", ext_phy);
+ seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, phy->band_idx);
seq_printf(file, "Length: %8d | ", bound[0]);
for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
@@ -432,7 +561,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
bound[i] + 1, bound[i + 1]);
seq_puts(file, "\nCount: ");
- n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < ARRAY_SIZE(bound); i++)
seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i + n]);
seq_puts(file, "\n");
@@ -521,14 +650,14 @@ mt7915_tx_stats_show(struct seq_file *file, void *data)
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static void
-mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
+mt7915_hw_queue_read(struct seq_file *s, u32 size,
const struct hw_queue_map *map)
{
struct mt7915_phy *phy = s->private;
struct mt7915_dev *dev = phy->dev;
u32 i, val;
- val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
+ val = mt76_rr(dev, MT_FL_Q_EMPTY);
for (i = 0; i < size; i++) {
u32 ctrl, head, tail, queued;
@@ -536,13 +665,13 @@ mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
continue;
ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
- mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
- head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ head = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(11, 0));
- tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(27, 16));
- queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
+ queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\t%s: ", map[i].name);
@@ -570,8 +699,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
if (val & BIT(offs))
continue;
- mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
- qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
@@ -633,7 +762,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
val, head, tail);
seq_puts(file, "PLE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
&ple_queue_map[0]);
/* iterate per-sta ple queue */
@@ -641,7 +770,7 @@ mt7915_hw_queues_show(struct seq_file *file, void *data)
mt7915_sta_hw_queue_read, file);
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
&pse_queue_map[0]);
return 0;
@@ -757,6 +886,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
debugfs_create_file("fw_util_wm", 0400, dir, dev,
&mt7915_fw_util_wm_fops);
debugfs_create_file("fw_util_wa", 0400, dir, dev,
@@ -768,16 +898,77 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
mt7915_twt_stats);
debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
- if (!dev->dbdc_support || ext_phy) {
+ if (!dev->dbdc_support || phy->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7915_rdd_monitor);
}
+ if (!ext_phy)
+ dev->debugfs_dir = dir;
+
return 0;
}
+static void
+mt7915_debugfs_write_fwlog(struct mt7915_dev *dev, const void *hdr, int hdrlen,
+ const void *data, int len)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ void *dest;
+
+ spin_lock_irqsave(&lock, flags);
+ dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
+ if (dest) {
+ *(u32 *)dest = hdrlen + len;
+ dest += 4;
+
+ if (hdrlen) {
+ memcpy(dest, hdr, hdrlen);
+ dest += hdrlen;
+ }
+
+ memcpy(dest, data, len);
+ relay_flush(dev->relay_fwlog);
+ }
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len)
+{
+ struct {
+ __le32 magic;
+ __le32 timestamp;
+ __le16 msg_type;
+ __le16 len;
+ } hdr = {
+ .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
+ .msg_type = cpu_to_le16(PKT_TYPE_RX_FW_MONITOR),
+ };
+
+ if (!dev->relay_fwlog)
+ return;
+
+ hdr.timestamp = cpu_to_le32(mt76_rr(dev, MT_LPON_FRCR(0)));
+ hdr.len = *(__le16 *)data;
+ mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
+}
+
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len)
+{
+ if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
+ return false;
+
+ if (dev->relay_fwlog)
+ mt7915_debugfs_write_fwlog(dev, NULL, 0, data, len);
+
+ return true;
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 9182568f95c7..49b4d8ade16b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -5,11 +5,11 @@
#include "../dma.h"
#include "mac.h"
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
+int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
int i, err;
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
if (err < 0)
return err;
@@ -40,140 +40,392 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
+static void mt7915_dma_config(struct mt7915_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+ } while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ if (is_mt7915(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
+ TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ }
+}
+
static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
-#define PREFETCH(base, depth) ((base) << 16 | (depth))
-
- mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ u32 base = 0;
+
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
+
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x180, 0x4));
+ if (!is_mt7915(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x1c0, 0x4));
+ base = 0x40;
+ }
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x1c0 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x200 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x240 + base, 0x4));
+
+ /* for mt7915, the ring which is next the last
+ * used ring must be initialized.
+ */
+ if (is_mt7915(&dev->mt76)) {
+ ofs += 0x4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x140, 0x0));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x200 + base, 0x0));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x280 + base, 0x0));
+ }
}
void mt7915_dma_prefetch(struct mt7915_dev *dev)
{
__mt7915_dma_prefetch(dev, 0);
if (dev->hif2)
- __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
+ __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
}
-int mt7915_dma_init(struct mt7915_dev *dev)
+static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
{
+ struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
- int ret;
-
- mt76_dma_attach(&dev->mt76);
if (dev->hif2)
- hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset */
+ if (rst) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+ }
+ }
+
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
- /* configure global setting */
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+static int mt7915_dma_enable(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ }
- /* configure delay interrupt */
+ /* configure delay interrupt off */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+ }
if (dev->hif2) {
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
+ hif1_ofs, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
+ hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
+ hif1_ofs, 0);
+ }
+ }
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
+
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+ }
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
}
- /* configure perfetch settings */
- mt7915_dma_prefetch(dev);
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_RX_DONE_MCU |
+ MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD;
+
+ if (!dev->phy.band_idx)
+ irq_mask |= MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support || dev->phy.band_idx)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ mt7915_irq_enable(dev, irq_mask);
+
+ return 0;
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7915_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7915_dma_disable(dev, true);
/* init tx queue */
- ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
- MT7915_TX_RING_SIZE);
+ ret = mt7915_init_tx_queues(&dev->phy,
+ MT_TXQ_ID(dev->phy.band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0));
if (ret)
return ret;
/* command to WM */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
if (ret)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
if (ret)
return ret;
/* firmware download */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
- MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7915_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
if (ret)
return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
- MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
- MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
- /* rx data queue */
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
- MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
- if (ret)
- return ret;
+ /* rx data queue for band0 */
+ if (!dev->phy.band_idx) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+ if (ret)
+ return ret;
+ }
- if (dev->dbdc_support) {
+ /* tx free notify event from WA for band0 */
+ if (!is_mt7915(mdev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+ }
+
+ if (dev->dbdc_support || dev->phy.band_idx) {
+ /* rx data queue for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
- MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
+ MT_RXQ_ID(MT_RXQ_EXT),
+ MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_DATA_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_EXT) + hif1_ofs);
if (ret)
return ret;
- /* event from WA */
+ /* tx free notify event from WA for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
- MT7915_RXQ_MCU_WA_EXT,
+ MT_RXQ_ID(MT_RXQ_EXT_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_EVENT_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_EXT_WA) + hif1_ofs);
if (ret)
return ret;
}
@@ -186,80 +438,14 @@ int mt7915_dma_init(struct mt7915_dev *dev)
mt7915_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
- /* hif wait WFDMA idle */
- mt76_set(dev, MT_WFDMA0_BUSY_ENA,
- MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_BUSY_ENA,
- MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
- MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
-
- /* set WFDMA Tx/Rx */
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND);
- }
-
- /* enable interrupts for TX/RX rings */
- mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
- MT_INT_MCU_CMD);
+ mt7915_dma_enable(dev);
return 0;
}
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
- /* disable */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- /* reset */
- mt76_clear(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_clear(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
+ mt7915_dma_disable(dev, true);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index edd74d0de157..5b133bcdab17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -10,6 +10,7 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
+ u32 offs;
if (!dev->flash_mode)
return 0;
@@ -22,7 +23,9 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
if (!dev->cal)
return -ENOMEM;
- return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
+ offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
+
+ return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@@ -32,24 +35,49 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
switch (val) {
case 0x7915:
+ case 0x7916:
+ case 0x7986:
return 0;
default:
return -EINVAL;
}
}
+static char *mt7915_eeprom_name(struct mt7915_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ return dev->dbdc_support ?
+ MT7915_EEPROM_DEFAULT_DBDC : MT7915_EEPROM_DEFAULT;
+ case 0x7986:
+ switch (mt7915_check_adie(dev, true)) {
+ case MT7976_ONE_ADIE_DBDC:
+ return MT7986_EEPROM_MT7976_DEFAULT_DBDC;
+ case MT7975_ONE_ADIE:
+ return MT7986_EEPROM_MT7975_DEFAULT;
+ case MT7976_ONE_ADIE:
+ return MT7986_EEPROM_MT7976_DEFAULT;
+ case MT7975_DUAL_ADIE:
+ return MT7986_EEPROM_MT7975_DUAL_DEFAULT;
+ case MT7976_DUAL_ADIE:
+ return MT7986_EEPROM_MT7976_DUAL_DEFAULT;
+ default:
+ break;
+ }
+ return NULL;
+ default:
+ return MT7916_EEPROM_DEFAULT;
+ }
+}
+
static int
mt7915_eeprom_load_default(struct mt7915_dev *dev)
{
- char *default_bin = MT7915_EEPROM_DEFAULT;
u8 *eeprom = dev->mt76.eeprom.data;
const struct firmware *fw = NULL;
int ret;
- if (dev->dbdc_support)
- default_bin = MT7915_EEPROM_DEFAULT_DBDC;
-
- ret = request_firmware(&fw, default_bin, dev->mt76.dev);
+ ret = request_firmware(&fw, mt7915_eeprom_name(dev), dev->mt76.dev);
if (ret)
return ret;
@@ -59,7 +87,7 @@ mt7915_eeprom_load_default(struct mt7915_dev *dev)
goto out;
}
- memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE);
+ memcpy(eeprom, fw->data, mt7915_eeprom_size(dev));
dev->flash_mode = true;
out:
@@ -71,8 +99,9 @@ out:
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
+ u16 eeprom_size = mt7915_eeprom_size(dev);
- ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
+ ret = mt76_eeprom_init(&dev->mt76, eeprom_size);
if (ret < 0)
return ret;
@@ -88,7 +117,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+ block_num = DIV_ROUND_UP(eeprom_size,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
mt7915_mcu_get_eeprom(dev,
@@ -98,17 +127,32 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
return mt7915_check_eeprom(dev);
}
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
- val = eeprom[MT_EE_WIFI_CONF + ext_phy];
+ val = eeprom[MT_EE_WIFI_CONF + phy->band_idx];
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
- if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support)
- val = ext_phy ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+
+ if (!is_mt7915(&dev->mt76)) {
+ switch (val) {
+ case MT_EE_V2_BAND_SEL_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_6GHZ:
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ default:
+ phy->mt76->cap.has_2ghz = true;
+ return;
+ }
+ }
switch (val) {
case MT_EE_BAND_SEL_5GHZ:
@@ -124,32 +168,65 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
}
}
-static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy)
{
- u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
+ u8 nss, nss_band, nss_band_max, *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+ bool ext_phy = phy != &dev->phy;
+
+ mt7915_eeprom_parse_band_config(phy);
- mt7915_eeprom_parse_band_config(&dev->phy);
+ /* read tx/rx mask from eeprom */
+ if (is_mt7915(&dev->mt76)) {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF]);
+ } else {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF + phy->band_idx]);
+ }
- /* read tx mask from eeprom */
- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH, eeprom[MT_EE_WIFI_CONF]);
if (!nss || nss > 4)
nss = 4;
+ /* read tx/rx stream */
nss_band = nss;
if (dev->dbdc_support) {
- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
- eeprom[MT_EE_WIFI_CONF + 3]);
- if (!nss_band || nss_band > 2)
- nss_band = 2;
+ if (is_mt7915(&dev->mt76)) {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (phy->band_idx)
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ } else {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+ eeprom[MT_EE_WIFI_CONF + 2 + phy->band_idx]);
+ }
+
+ nss_band_max = is_mt7986(&dev->mt76) ?
+ MT_EE_NSS_MAX_DBDC_MA7986 : MT_EE_NSS_MAX_DBDC_MA7915;
+ } else {
+ nss_band_max = is_mt7986(&dev->mt76) ?
+ MT_EE_NSS_MAX_MA7986 : MT_EE_NSS_MAX_MA7915;
+ }
- if (nss_band >= nss)
- nss = 4;
+ if (!nss_band || nss_band > nss_band_max)
+ nss_band = nss_band_max;
+
+ if (nss_band > nss) {
+ dev_warn(dev->mt76.dev,
+ "nss mismatch, nss(%d) nss_band(%d) band(%d) ext_phy(%d)\n",
+ nss, nss_band, phy->band_idx, ext_phy);
+ nss = nss_band;
}
- dev->chainmask = BIT(nss) - 1;
- dev->mphy.antenna_mask = BIT(nss_band) - 1;
- dev->mphy.chainmask = dev->mphy.antenna_mask;
+ mphy->chainmask = BIT(nss) - 1;
+ if (ext_phy)
+ mphy->chainmask <<= dev->chainshift;
+ mphy->antenna_mask = BIT(nss_band) - 1;
+ dev->chainmask |= mphy->chainmask;
+ dev->chainshift = hweight8(dev->mphy.chainmask);
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
@@ -171,7 +248,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
if (ret)
return ret;
- mt7915_eeprom_parse_hw_cap(dev);
+ mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
@@ -186,27 +263,43 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
{
u8 *eeprom = dev->mt76.eeprom.data;
int index, target_power;
- bool tssi_on;
+ bool tssi_on, is_7976;
if (chain_idx > 3)
return -EINVAL;
tssi_on = mt7915_tssi_enabled(dev, chan->band);
+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
if (chan->band == NL80211_BAND_2GHZ) {
- index = MT_EE_TX0_POWER_2G + chain_idx * 3;
- target_power = eeprom[index];
-
- if (!tssi_on)
- target_power += eeprom[index + 1];
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_2G_V2 + chain_idx;
+ target_power = eeprom[index];
+ } else {
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ target_power = eeprom[index];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 1];
+ }
+ } else if (chan->band == NL80211_BAND_5GHZ) {
+ int group = mt7915_get_channel_group_5g(chan->hw_value, is_7976);
+
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_5G_V2 + chain_idx * 5;
+ target_power = eeprom[index + group];
+ } else {
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ target_power = eeprom[index + group];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 8];
+ }
} else {
- int group = mt7915_get_channel_group(chan->hw_value);
-
- index = MT_EE_TX0_POWER_5G + chain_idx * 12;
- target_power = eeprom[index + group];
+ int group = mt7915_get_channel_group_6g(chan->hw_value);
- if (!tssi_on)
- target_power += eeprom[index + 8];
+ index = MT_EE_TX0_POWER_6G_V2 + chain_idx * 8;
+ target_power = is_7976 ? eeprom[index + group] : 0;
}
return target_power;
@@ -215,15 +308,20 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
u8 *eeprom = dev->mt76.eeprom.data;
- u32 val;
+ u32 val, offs;
s8 delta;
+ bool is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
if (band == NL80211_BAND_2GHZ)
- val = eeprom[MT_EE_RATE_DELTA_2G];
+ offs = is_7976 ? MT_EE_RATE_DELTA_2G_V2 : MT_EE_RATE_DELTA_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ offs = is_7976 ? MT_EE_RATE_DELTA_5G_V2 : MT_EE_RATE_DELTA_5G;
else
- val = eeprom[MT_EE_RATE_DELTA_5G];
+ offs = is_7976 ? MT_EE_RATE_DELTA_6G_V2 : 0;
+
+ val = eeprom[offs];
- if (!(val & MT_EE_RATE_DELTA_EN))
+ if (!offs || !(val & MT_EE_RATE_DELTA_EN))
return 0;
delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index a43389a41800..7578ac6d0be6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -23,11 +23,19 @@ enum mt7915_eeprom_field {
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
+ MT_EE_RATE_DELTA_5G_V2 = 0x81e,
+ MT_EE_RATE_DELTA_6G_V2 = 0x884, /* 6g fields only appear in eeprom v2 */
+ MT_EE_TX0_POWER_2G_V2 = 0x441,
+ MT_EE_TX0_POWER_5G_V2 = 0x445,
+ MT_EE_TX0_POWER_6G_V2 = 0x465,
MT_EE_ADIE_FT_VERSION = 0x9a0,
__MT_EE_MAX = 0xe00,
+ __MT_EE_MAX_V2 = 0x1000,
/* 0xe10 ~ 0x5780 used to save group cal data */
- MT_EE_PRECAL = 0xe10
+ MT_EE_PRECAL = 0xe10,
+ MT_EE_PRECAL_V2 = 0x1010
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
@@ -39,6 +47,7 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)
@@ -49,6 +58,19 @@ enum mt7915_eeprom_field {
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
+#define MT_EE_NSS_MAX_MA7915 4
+#define MT_EE_NSS_MAX_DBDC_MA7915 2
+#define MT_EE_NSS_MAX_MA7986 4
+#define MT_EE_NSS_MAX_DBDC_MA7986 4
+
+enum mt7915_adie_sku {
+ MT7976_ONE_ADIE_DBDC = 0x7,
+ MT7975_ONE_ADIE = 0x8,
+ MT7976_ONE_ADIE = 0xa,
+ MT7975_DUAL_ADIE = 0xd,
+ MT7976_DUAL_ADIE = 0xf,
+};
+
enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DEFAULT,
MT_EE_BAND_SEL_5GHZ,
@@ -56,6 +78,13 @@ enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DUAL,
};
+enum {
+ MT_EE_V2_BAND_SEL_2GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ,
+ MT_EE_V2_BAND_SEL_6GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ_6GHZ,
+};
+
enum mt7915_sku_rate_group {
SKU_CCK,
SKU_OFDM,
@@ -76,8 +105,20 @@ enum mt7915_sku_rate_group {
};
static inline int
-mt7915_get_channel_group(int channel)
+mt7915_get_channel_group_5g(int channel, bool is_7976)
{
+ if (is_7976) {
+ if (channel <= 64)
+ return 0;
+ if (channel <= 96)
+ return 1;
+ if (channel <= 128)
+ return 2;
+ if (channel <= 144)
+ return 3;
+ return 4;
+ }
+
if (channel >= 184 && channel <= 196)
return 0;
if (channel <= 48)
@@ -95,6 +136,15 @@ mt7915_get_channel_group(int channel)
return 7;
}
+static inline int
+mt7915_get_channel_group_6g(int channel)
+{
+ if (channel <= 29)
+ return 0;
+
+ return DIV_ROUND_UP(channel - 29, 32);
+}
+
static inline bool
mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index d054cdecd5f7..6d29366c5139 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -50,15 +50,22 @@ static ssize_t mt7915_thermal_temp_show(struct device *dev,
int i = to_sensor_dev_attr(attr)->index;
int temperature;
- if (i)
- return sprintf(buf, "%u\n", phy->throttle_temp[i - 1] * 1000);
-
- temperature = mt7915_mcu_get_temperature(phy);
- if (temperature < 0)
- return temperature;
-
- /* display in millidegree celcius */
- return sprintf(buf, "%u\n", temperature * 1000);
+ switch (i) {
+ case 0:
+ temperature = mt7915_mcu_get_temperature(phy);
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ case 1:
+ case 2:
+ return sprintf(buf, "%u\n",
+ phy->throttle_temp[i - 1] * 1000);
+ case 3:
+ return sprintf(buf, "%hhu\n", phy->throttle_state);
+ default:
+ return -EINVAL;
+ }
}
static ssize_t mt7915_thermal_temp_store(struct device *dev,
@@ -84,11 +91,13 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0);
static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1);
static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(throttle1, mt7915_thermal_temp, 3);
static struct attribute *mt7915_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_throttle1.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7915_hwmon);
@@ -97,7 +106,7 @@ static int
mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = MT7915_THERMAL_THROTTLE_MAX;
+ *state = MT7915_CDEV_THROTTLE_MAX;
return 0;
}
@@ -108,7 +117,7 @@ mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
{
struct mt7915_phy *phy = cdev->devdata;
- *state = phy->throttle_state;
+ *state = phy->cdev_state;
return 0;
}
@@ -118,22 +127,27 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct mt7915_phy *phy = cdev->devdata;
+ u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
int ret;
- if (state > MT7915_THERMAL_THROTTLE_MAX)
+ if (state > MT7915_CDEV_THROTTLE_MAX)
return -EINVAL;
if (phy->throttle_temp[0] > phy->throttle_temp[1])
return 0;
- if (state == phy->throttle_state)
+ if (state == phy->cdev_state)
return 0;
- ret = mt7915_mcu_set_thermal_throttling(phy, state);
+ /*
+ * cooling_device convention: 0 = no cooling, more = more cooling
+ * mcu convention: 1 = max cooling, more = less cooling
+ */
+ ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
if (ret)
return ret;
- phy->throttle_state = state;
+ phy->cdev_state = state;
return 0;
}
@@ -186,7 +200,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
phy->throttle_temp[0] = 110;
phy->throttle_temp[1] = 120;
- return 0;
+ return mt7915_mcu_set_thermal_throttling(phy,
+ MT7915_THERMAL_THROTTLE_MAX);
}
static void mt7915_led_set_config(struct led_classdev *led_cdev,
@@ -288,17 +303,18 @@ mt7915_regd_notifier(struct wiphy *wiphy,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
struct mt7915_phy *phy = mphy->priv;
- struct cfg80211_chan_def *chandef = &mphy->chandef;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7915_mcu_rdd_background_enable(phy, NULL);
+
mt7915_init_txpower(dev, &mphy->sband_2g.sband);
mt7915_init_txpower(dev, &mphy->sband_5g.sband);
+ mt7915_init_txpower(dev, &mphy->sband_6g.sband);
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
+ mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7915_dfs_init_radar_detector(phy);
}
@@ -306,11 +322,13 @@ static void
mt7915_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
+ struct mt7915_dev *dev = phy->dev;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -325,6 +343,7 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7915_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->mbssid_max_interfaces = 16;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -333,9 +352,16 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ if (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background"))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
hw->max_tx_fragments = 4;
@@ -349,14 +375,34 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (is_mt7915(&dev->mt76)) {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ } else {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ }
}
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
mt7915_set_stream_he_caps(phy);
+
+ wiphy->available_antennas_rx = phy->mt76->antenna_mask;
+ wiphy->available_antennas_tx = phy->mt76->antenna_mask;
}
static void
@@ -387,19 +433,27 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
- /* disable rx rate report by default due to hw issues */
+
+ /* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
static void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
+ u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
+
+ /* config pse qid6 wfdma port selection */
+ if (!is_mt7915(&dev->mt76) && dev->hif2)
+ mt76_rmw(dev, MT_WF_PP_TOP_RXQ_WFDMA_CF_5, 0,
+ MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK);
+
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
- mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
- for (i = 0; i < MT7915_WTBL_SIZE; i++)
+ for (i = 0; i < mt7915_wtbl_size(dev); i++)
mt7915_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
@@ -449,20 +503,32 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
- mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
- mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
+
+ /* Bind main phy to band0 and ext_phy to band1 for dbdc case */
+ phy->band_idx = 1;
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
- mt7915_eeprom_parse_band_config(phy);
- mt7915_init_wiphy(mphy->hw);
+ mt7915_eeprom_parse_hw_cap(dev, phy);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
ETH_ALEN);
+ /* Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ if (!is_valid_ether_addr(mphy->macaddr)) {
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mphy->macaddr[0] |= 2;
+ mphy->macaddr[0] ^= BIT(7);
+ }
mt76_eeprom_override(mphy);
- ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
- MT7915_TX_RING_SIZE);
+ /* init wiphy according to mphy and phy */
+ mt7915_init_wiphy(mphy->hw);
+ ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(phy->band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(1));
if (ret)
goto error;
@@ -495,46 +561,88 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_mac_init(dev);
mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_6g.sband);
mt7915_txbf_init(dev);
}
static void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
- u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
-
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
- mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
+ if (is_mt7915(&dev->mt76)) {
+ u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
- /* change to software control */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
- /* reset wfsys */
- val &= ~MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* change to software control */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* release wfsys then mcu re-excutes romcode */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* reset wfsys */
+ val &= ~MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* switch to hw control */
- val &= ~MT_TOP_PWR_SW_RST;
- val |= MT_TOP_PWR_HW_CTRL;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* release wfsys then mcu re-executes romcode */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* check whether mcu resets to default */
- if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
- MT_MCU_DUMMY_DEFAULT, 1000)) {
- dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
- return;
+ /* switch to hw control */
+ val &= ~MT_TOP_PWR_SW_RST;
+ val |= MT_TOP_PWR_HW_CTRL;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* check whether mcu resets to default */
+ if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR,
+ MT_MCU_DUMMY_DEFAULT, MT_MCU_DUMMY_DEFAULT,
+ 1000)) {
+ dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
+ return;
+ }
+
+ /* wfsys reset won't clear host registers */
+ mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+
+ msleep(100);
+ } else if (is_mt7986(&dev->mt76)) {
+ mt7986_wmac_disable(dev);
+ msleep(20);
+
+ mt7986_wmac_enable(dev);
+ msleep(20);
+ } else {
+ mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+
+ mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
}
+}
- /* wfsys reset won't clear host registers */
- mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+static bool mt7915_band_config(struct mt7915_dev *dev)
+{
+ bool ret = true;
+
+ dev->phy.band_idx = 0;
+
+ if (is_mt7986(&dev->mt76)) {
+ u32 sku = mt7915_check_adie(dev, true);
- msleep(100);
+ /*
+ * for mt7986, dbdc support is determined by the number
+ * of adie chips and the main phy is bound to band1 when
+ * dbdc is disabled.
+ */
+ if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
+ dev->phy.band_idx = 1;
+ ret = false;
+ }
+ } else {
+ ret = is_mt7915(&dev->mt76) ?
+ !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
+ }
+
+ return ret;
}
static int mt7915_init_hardware(struct mt7915_dev *dev)
@@ -544,7 +652,8 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
+
+ dev->dbdc_support = mt7915_band_config(dev);
/* If MCU was already running, it is likely in a bad state */
if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
@@ -557,12 +666,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
- /*
- * force firmware operation mode into normal state,
- * which should be set before firmware download stage.
- */
- mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
-
ret = mt7915_mcu_init(dev);
if (ret) {
/* Reset and try again */
@@ -577,7 +680,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
if (ret < 0)
return ret;
-
if (dev->flash_mode) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
@@ -626,11 +728,18 @@ void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
}
static void
-mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
+mt7915_set_stream_he_txbf_caps(struct mt7915_dev *dev,
+ struct ieee80211_sta_he_cap *he_cap,
int vif, int nss)
{
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
- u8 c;
+ u8 c, nss_160;
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ nss_160 = nss / 2;
+ else
+ nss_160 = nss;
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -684,13 +793,21 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
/* num_snd_dim
* for mt7915, max supported nss is 2 for bw > 80MHz
*/
- c = (nss - 1) |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
+ c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ nss - 1) |
+ FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ nss_160 - 1);
elem->phy_cap_info[5] |= c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
+
+ if (!is_mt7915(&dev->mt76)) {
+ c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ elem->phy_cap_info[7] |= c;
+ }
}
static void
@@ -718,9 +835,17 @@ static int
mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data)
{
+ struct mt7915_dev *dev = phy->dev;
int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
+ u8 nss_160;
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76) && !dev->dbdc_support)
+ nss_160 = nss / 2;
+ else
+ nss_160 = nss;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -728,8 +853,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
else
mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
- /* Can do 1/2 of NSS streams in 160Mhz mode. */
- if (i < nss / 2)
+ if (i < nss_160)
mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
else
mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
@@ -767,7 +891,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -806,7 +930,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
@@ -845,7 +969,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
- mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
+ mt7915_set_stream_he_txbf_caps(dev, he_cap, i, nss);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
@@ -856,6 +980,21 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
}
+
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_8,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+
+ data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+
idx++;
}
@@ -885,6 +1024,15 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
band->iftype_data = data;
band->n_iftype_data = n;
}
+
+ if (phy->mt76->cap.has_6ghz) {
+ data = phy->iftype[NL80211_BAND_6GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+ band = &phy->mt76->sband_6g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
}
static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
@@ -924,15 +1072,6 @@ int mt7915_register_device(struct mt7915_dev *dev)
mt7915_init_wiphy(hw);
- if (!dev->dbdc_support)
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
-
- dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
- dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- dev->phy.dfs_state = -1;
-
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
@@ -971,5 +1110,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
+ if (is_mt7986(&dev->mt76))
+ mt7986_wmac_disable(dev);
+
mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 48f115502282..e9e7efbf350d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -165,7 +165,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7915_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@@ -226,8 +226,8 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
u32 ru_h, ru_l;
u8 ru, offs = 0;
- ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
- ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
ru = (u8)(ru_l | ru_h << 4);
status->bw = RATE_INFO_BW_HE_RU;
@@ -349,14 +349,16 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
@@ -376,7 +378,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
@@ -391,15 +394,15 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
MT_RXD3_NORMAL_U2M)
return -EINVAL;
@@ -413,47 +416,52 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
+ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -463,6 +471,108 @@ static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
}
static int
+mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv)
+{
+ u32 v0, v2;
+ u8 stbc, gi, bw, dcm, mode, nss;
+ int i, idx;
+ bool cck = false;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+
+ if (!is_mt7915(&dev->mt76)) {
+ stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
+ gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
+ mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ dcm = FIELD_GET(MT_PRXV_DCM, v0);
+ bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
+ } else {
+ stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+ dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
+ bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
+ }
+
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = nss;
+ status->encoding = RX_ENC_VHT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 9)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = dcm;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ return 0;
+}
+
+static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -485,11 +595,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
u16 seq_ctrl = 0;
u8 qos_ctl = 0;
__le16 fc = 0;
- int i, idx;
+ int idx;
memset(status, 0, sizeof(*status));
- if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
+ if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
mphy = dev->mt76.phy2;
if (!mphy)
return -EINVAL;
@@ -530,6 +640,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->band = mphy->chandef.chan->band;
if (status->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (status->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -626,7 +738,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u32 v0, v1, v2;
+ u32 v0, v1;
+ int ret;
rxv = rxd;
rxd += 2;
@@ -635,7 +748,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
- v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
@@ -645,94 +757,18 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
- status->signal = status->chain_signal[0];
-
- for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
- if (!(status->chains & BIT(i)))
- continue;
-
- status->signal = max(status->signal,
- status->chain_signal[i]);
- }
/* RXD Group 5 - C-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
- u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
- u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- bool cck = false;
-
rxd += 18;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
+ }
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 9)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
- return -EINVAL;
- }
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
+ ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
+ if (ret < 0)
+ return ret;
}
}
@@ -801,6 +837,10 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
+ /* drop no data frame */
+ if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))
+ return -EINVAL;
+
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(fc);
status->qos_ctl = qos_ctl;
@@ -818,13 +858,13 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
__le32 *rxv_hdr = rxd + 2;
__le32 *rxv = rxd + 4;
u32 rcpi, ib_rssi, wb_rssi, v20, v21;
- bool ext_phy;
+ u8 band_idx;
s32 foe;
u8 snr;
int i;
- ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1]));
- if (ext_phy)
+ band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
+ if (band_idx && !phy->band_idx)
phy = mt7915_ext_phy(dev);
rcpi = le32_to_cpu(rxv[6]);
@@ -1065,6 +1105,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
if (ieee80211_is_beacon(fc)) {
txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 0x18));
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
@@ -1080,6 +1121,7 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
val = MT_TXD3_SN_VALID |
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
txwi[3] |= cpu_to_le32(val);
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
@@ -1140,7 +1182,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct ieee80211_vif *vif = info->control.vif;
struct mt76_phy *mphy = &dev->mphy;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
- u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
bool mcast = false;
u16 tx_count = 15;
@@ -1151,6 +1193,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
omac_idx = mvif->mt76.omac_idx;
wmm_idx = mvif->mt76.wmm_idx;
+ band_idx = mvif->mt76.band_idx;
}
if (ext_phy && dev->mt76.phy2)
@@ -1165,7 +1208,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
} else {
p_fmt = MT_TX_TYPE_CT;
q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
- mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
@@ -1177,7 +1220,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
- if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ if (ext_phy || band_idx)
val |= MT_TXD1_TGID;
txwi[1] = cpu_to_le32(val);
@@ -1311,10 +1354,10 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
u16 fc, tid;
u32 val;
- if (!sta || !sta->ht_cap.ht_supported)
+ if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
return;
- tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
if (tid >= 6) /* skip VO queue */
return;
@@ -1362,7 +1405,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
mt7915_tx_check_aggr(sta, txwi);
} else {
- wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -1383,8 +1426,10 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
LIST_HEAD(free_list);
struct sk_buff *skb, *tmp;
void *end = data + len;
- u8 i, count;
- bool wake = false;
+ bool v3, wake = false;
+ u16 total, count = 0;
+ u32 txd = le32_to_cpu(free->txd);
+ __le32 *cur_info;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1394,17 +1439,14 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
}
- /*
- * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
- if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
+ if (WARN_ON_ONCE((void *)&free->info[total >> v3] > end))
return;
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
+ for (cur_info = &free->info[0]; count < total; cur_info++) {
+ u32 msdu, info = le32_to_cpu(*cur_info);
+ u8 i;
/*
* 1'b1: new wcid pair.
@@ -1415,7 +1457,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
struct mt76_wcid *wcid;
u16 idx;
- count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
@@ -1430,12 +1471,24 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
continue;
}
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
+ if (v3 && (info & MT_TX_FREE_MPDU_HEADER))
continue;
- mt7915_txwi_free(dev, txwi, sta, &free_list);
+ for (i = 0; i < 1 + v3; i++) {
+ if (v3) {
+ msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
+ if (msdu == MT_TX_FREE_MSDU_ID_V3)
+ continue;
+ } else {
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ }
+ count++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7915_txwi_free(dev, txwi, sta, &free_list);
+ }
}
mt7915_mac_sta_poll(dev);
@@ -1504,6 +1557,8 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -1512,7 +1567,6 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
@@ -1578,23 +1632,18 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
- u32 txs;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
return;
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
- txs = le32_to_cpu(txs_data[3]);
- pid = FIELD_GET(MT_TXS3_PID, txs);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
if (pid < MT_PACKET_ID_FIRST)
return;
- if (wcidx >= MT7915_WTBL_SIZE)
+ if (wcidx >= mt7915_wtbl_size(dev))
return;
rcu_read_lock();
@@ -1626,7 +1675,8 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7915_mac_tx_free(dev, data, len);
@@ -1635,6 +1685,9 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
return false;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, data, len);
+ return false;
default:
return true;
}
@@ -1648,7 +1701,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
@@ -1666,6 +1719,10 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ break;
case PKT_TYPE_NORMAL:
if (!mt7915_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
@@ -1702,8 +1759,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
- u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
+ u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx);
mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
mt76_set(dev, reg, BIT(11) | BIT(9));
@@ -1712,25 +1768,22 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
void mt7915_mac_reset_counters(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
int i;
for (i = 0; i < 4; i++) {
- mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
}
- if (ext_phy) {
- dev->mt76.phy2->survey_time = ktime_get_boottime();
+ i = 0;
+ phy->mt76->survey_time = ktime_get_boottime();
+ if (phy->band_idx)
i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
- } else {
- dev->mt76.phy.survey_time = ktime_get_boottime();
- i = 0;
- }
+
memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
/* reset airtime counters */
- mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx),
MT_WF_RMAC_MIB_RXTIME_CLR);
mt7915_mcu_get_chan_mib_info(phy, true);
@@ -1740,29 +1793,23 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
{
s16 coverage_class = phy->coverage_class;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
u32 val, reg_offset;
u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
int offset;
- bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+ bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
- if (ext_phy) {
+ if (ext_phy)
coverage_class = max_t(s16, dev->phy.coverage_class,
- coverage_class);
- } else {
- struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
+ ext_phy->coverage_class);
- if (phy_ext)
- coverage_class = max_t(s16, phy_ext->coverage_class,
- coverage_class);
- }
- mt76_set(dev, MT_ARB_SCR(ext_phy),
+ mt76_set(dev, MT_ARB_SCR(phy->band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
udelay(1);
@@ -1770,35 +1817,40 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
- mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
- mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
- mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
- FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) |
+ mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
+ FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
FIELD_PREP(MT_IFS_RIFS, 2) |
FIELD_PREP(MT_IFS_SIFS, 10) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
- mt76_wr(dev, MT_TMAC_ICR1(ext_phy),
+ mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
FIELD_PREP(MT_IFS_EIFS_CCK, 314));
- if (phy->slottime < 20 || is_5ghz)
+ if (phy->slottime < 20 || a_band)
val = MT7915_CFEND_RATE_DEFAULT;
else
val = MT7915_CFEND_RATE_11B;
- mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
- mt76_clear(dev, MT_ARB_SCR(ext_phy),
+ mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(phy->band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
{
- mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy),
+ u32 reg;
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
+ MT_WF_PHY_RXTD12_MT7916(ext_phy);
+ mt76_set(dev, reg,
MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
MT_WF_PHY_RXTD12_IRPI_SW_CLR);
- mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy),
- FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
+ MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
+ mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
}
static u8
@@ -1810,7 +1862,9 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
int nss, i;
for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
- u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support));
+ u32 reg = is_mt7915(&dev->mt76) ?
+ MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
+ MT_WF_IRPI_NSS_MT7916(idx, nss);
for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
val = mt76_rr(dev, reg);
@@ -1829,12 +1883,11 @@ void mt7915_update_channel(struct mt76_phy *mphy)
{
struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
struct mt76_channel_state *state = mphy->chan_state;
- bool ext_phy = phy != &phy->dev->phy;
int nf;
mt7915_mcu_get_chan_mib_info(phy, false);
- nf = mt7915_phy_get_nf(phy, ext_phy);
+ nf = mt7915_phy_get_nf(phy, phy->band_idx);
if (!phy->noise)
phy->noise = nf << 4;
else if (nf)
@@ -1891,20 +1944,26 @@ static void
mt7915_dma_reset(struct mt7915_dev *dev)
{
struct mt76_phy *mphy_ext = dev->mt76.phy2;
- u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
if (dev->hif2) {
mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN);
}
usleep_range(1000, 2000);
@@ -1928,19 +1987,23 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN |
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO));
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ if (is_mt7915(&dev->mt76))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
}
}
@@ -2050,106 +2113,96 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
- bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1, cnt;
+ u32 val;
- mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx));
mib->rx_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx));
mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
mib->tx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
- mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
+ mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
- mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx));
+ mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
mib->rx_ampdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx));
mib->rx_ampdu_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
- mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx));
+ mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
- mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
+ mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
- mib->rx_vec_queue_overflow_drop_cnt +=
- FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx));
+ mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx));
mib->rx_ba_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
- mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
-
- cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
- mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
-
- cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
- cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx));
mib->tx_mu_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx));
mib->tx_mu_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx));
mib->tx_su_acked_mpdu_cnt += cnt;
- cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
- mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
- mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
- mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
- mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
- mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
- mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
- mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
- mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
- mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
-
- cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
- mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
- mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+ cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx));
+ mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
+ mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
+ mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
@@ -2157,27 +2210,97 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy)
mib->tx_amsdu_cnt += cnt;
}
- aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- u32 val;
+ aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ if (is_mt7915(&dev->mt76)) {
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4)));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
- mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- mib->ack_fail_cnt +=
- FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
- val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt +=
- FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
- val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx));
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
- val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
- dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx));
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
+ } else {
+ for (i = 0; i < 2; i++) {
+ /* rts count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2)));
+ mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* rts retry count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2)));
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ba miss count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2)));
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ack fail count */
+ val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2)));
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ for (i = 0; i < 8; i++) {
+ val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx));
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx));
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx));
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
}
}
@@ -2248,39 +2371,59 @@ static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
struct mt7915_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
{
- int err;
+ int err, region;
- err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, region);
if (err < 0)
return err;
- return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
{
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
int err;
/* start CAC */
- err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- err = mt7915_dfs_start_rdd(dev, ext_phy);
+ err = mt7915_dfs_start_rdd(dev, phy->band_idx);
if (err < 0)
return err;
- phy->rdd_state |= BIT(ext_phy);
+ phy->rdd_state |= BIT(phy->band_idx);
+
+ if (!is_mt7915(&dev->mt76))
+ return 0;
if (chandef->width == NL80211_CHAN_WIDTH_160 ||
chandef->width == NL80211_CHAN_WIDTH_80P80) {
@@ -2330,48 +2473,56 @@ mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
-
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7915_dfs_stop_radar_detector(phy);
- err = mt7915_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7915_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7915_dfs_start_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
+
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ phy->band_idx, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
+ phy->band_idx, MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7915_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index 7a2c740d1464..5add1dd36dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -23,6 +23,7 @@ enum rx_pkt_type {
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
+ PKT_TYPE_RX_FW_MONITOR = 0x0c,
};
/* RXD DW1 */
@@ -125,6 +126,12 @@ enum rx_pkt_type {
#define MT_PRXV_RCPI2 GENMASK(23, 16)
#define MT_PRXV_RCPI1 GENMASK(15, 8)
#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HT_SHORT_GI GENMASK(16, 15)
+#define MT_PRXV_HT_STBC GENMASK(23, 22)
+#define MT_PRXV_TX_MODE GENMASK(27, 24)
+#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
+#define MT_PRXV_DCM BIT(17)
+#define MT_PRXV_NUM_RX BIT(20, 18)
/* C-RXV */
#define MT_CRXV_HT_STBC GENMASK(1, 0)
@@ -298,18 +305,20 @@ struct mt7915_txp {
struct mt7915_tx_free {
__le16 rx_byte_cnt;
__le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
+ __le32 txd;
__le32 info[];
} __packed __aligned(4);
+#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */
-#define MT_TX_FREE_STATUS GENMASK(14, 13)
#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
#define MT_TX_FREE_PAIR BIT(31)
+#define MT_TX_FREE_MPDU_HEADER BIT(30)
+#define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0)
+
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 8ac6f59af174..c3f44d801e7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -34,7 +34,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
running = mt7915_dev_running(dev);
if (!running) {
- ret = mt7915_mcu_set_pm(dev, 0, 0);
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
if (ret)
goto out;
@@ -49,8 +49,8 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, 0);
}
- if (phy != &dev->phy) {
- ret = mt7915_mcu_set_pm(dev, 1, 0);
+ if (phy != &dev->phy || phy->band_idx) {
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
if (ret)
goto out;
@@ -65,7 +65,8 @@ static int mt7915_start(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, 1);
}
- ret = mt7915_mcu_set_rts_thresh(phy, 0x92b);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ phy != &dev->phy);
if (ret)
goto out;
@@ -106,12 +107,12 @@ static void mt7915_stop(struct ieee80211_hw *hw)
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
- mt7915_mcu_set_pm(dev, 1, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
mt7915_mcu_set_mac(dev, 1, false, false);
}
if (!mt7915_dev_running(dev)) {
- mt7915_mcu_set_pm(dev, 0, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
mt7915_mcu_set_mac(dev, 0, false, false);
}
@@ -216,7 +217,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
}
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
- mvif->mt76.band_idx = ext_phy;
+ mvif->mt76.band_idx = phy->band_idx;
mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
if (ext_phy)
@@ -234,7 +235,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
+ mvif->sta.wcid.ext_phy = ext_phy;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
mt76_packet_id_init(&mvif->sta.wcid);
@@ -256,6 +257,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mt7915_init_bitrate_mask(vif);
memset(&mvif->cap, -1, sizeof(mvif->cap));
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
+
out:
mutex_unlock(&dev->mt76.mutex);
@@ -298,25 +302,6 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
-static void mt7915_init_dfs_state(struct mt7915_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
-
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- return;
-
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
- if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
- mphy->chandef.width == chandef->width)
- return;
-
- phy->dfs_state = -1;
-}
-
int mt7915_set_channel(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -327,7 +312,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
- mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (dev->flash_mode) {
@@ -366,6 +350,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
&mvif->sta;
@@ -405,6 +390,11 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&dev->mt76.mutex);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
@@ -415,8 +405,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
-
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_EXT_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -498,11 +489,10 @@ static int
mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7915_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
@@ -664,6 +654,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ bool ext_phy = mvif->phy != &dev->phy;
int ret, idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
@@ -675,7 +666,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
- msta->wcid.ext_phy = mvif->mt76.band_idx;
+ msta->wcid.ext_phy = ext_phy;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
@@ -746,7 +737,7 @@ static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
int ret;
mutex_lock(&dev->mt76.mutex);
- ret = mt7915_mcu_set_rts_thresh(phy, val);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -861,8 +852,12 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
: mvif->mt76.omac_idx;
/* TSF software read */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_READ);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
@@ -904,8 +899,12 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_WRITE);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
mutex_unlock(&dev->mt76.mutex);
}
@@ -931,8 +930,12 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_ADJUST);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
@@ -967,12 +970,9 @@ mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
phy->mt76->antenna_mask = tx_ant;
- if (ext_phy) {
- if (dev->chainmask == 0xf)
- tx_ant <<= 2;
- else
- tx_ant <<= 1;
- }
+ if (ext_phy)
+ tx_ant <<= dev->chainshift;
+
phy->mt76->chainmask = tx_ant;
mt76_set_stream_caps(phy->mt76, true);
@@ -994,7 +994,8 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (is_mt7915(&phy->dev->mt76) &&
+ !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1079,7 +1080,7 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
@@ -1095,7 +1096,7 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -1238,7 +1239,6 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
- bool ext_phy = phy != &dev->phy;
int i, n, ei = 0;
mutex_lock(&dev->mt76.mutex);
@@ -1255,7 +1255,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
data[ei++] = mib->tx_pkt_ibf_cnt;
/* Tx ampdu stat */
- n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ n = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
data[ei++] = dev->mt76.aggr_stats[i + n];
@@ -1332,6 +1332,55 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7915_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ int ret = -EINVAL;
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ goto out;
+
+ if (dev->rdd2_phy && dev->rdd2_phy != phy) {
+ /* rdd2 is already locked */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* rdd2 already configured on a radar channel */
+ running = dev->rdd2_phy &&
+ cfg80211_chandef_valid(&dev->rdd2_chandef) &&
+ !!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
+
+ if (!chandef || running ||
+ !(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
+ ret = mt7915_mcu_rdd_background_enable(phy, NULL);
+ if (ret)
+ goto out;
+
+ if (!running)
+ goto update_phy;
+ }
+
+ ret = mt7915_mcu_rdd_background_enable(phy, chandef);
+ if (ret)
+ goto out;
+
+update_phy:
+ dev->rdd2_phy = chandef ? phy : NULL;
+ if (chandef)
+ dev->rdd2_chandef = *chandef;
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@@ -1378,4 +1427,5 @@ const struct ieee80211_ops mt7915_ops = {
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
+ .set_radar_background = mt7915_set_radar_background,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 0911b6f973b5..e7a6f80e7755 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -64,136 +64,31 @@ struct mt7915_fw_region {
u8 reserved1[15];
} __packed;
-#define MCU_PATCH_ADDRESS 0x200000
-
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
+#define fw_name(_dev, name, ...) ({ \
+ char *_fw; \
+ switch (mt76_chip(&(_dev)->mt76)) { \
+ case 0x7915: \
+ _fw = MT7915_##name; \
+ break; \
+ case 0x7986: \
+ _fw = MT7986_##name##__VA_ARGS__; \
+ break; \
+ default: \
+ _fw = MT7916_##name; \
+ break; \
+ } \
+ _fw; \
+})
+
+#define fw_name_var(_dev, name) (mt7915_check_adie(dev, false) ? \
+ fw_name(_dev, name) : \
+ fw_name(_dev, name, _MT7975))
-#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+#define MCU_PATCH_ADDRESS 0x200000
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
-static enum mcu_cipher_type
-mt7915_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
-static const struct ieee80211_sta_he_cap *
-mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
-{
- struct ieee80211_supported_band *sband;
- enum nl80211_band band;
-
- band = phy->mt76->chandef.chan->band;
- sband = phy->mt76->hw->wiphy->bands[band];
-
- return ieee80211_get_he_iftype_cap(sband, vif->type);
-}
-
-static u8
-mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
- struct ieee80211_sta_ht_cap *ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 mode = 0;
-
- if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
- } else {
- struct ieee80211_supported_band *sband;
-
- sband = mvif->phy->mt76->hw->wiphy->bands[band];
-
- ht_cap = &sband->ht_cap;
- vht_cap = &sband->vht_cap;
- he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
- }
-
- if (band == NL80211_BAND_2GHZ) {
- mode |= PHY_MODE_B | PHY_MODE_G;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_GN;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ) {
- mode |= PHY_MODE_A;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_AN;
-
- if (vht_cap->vht_supported)
- mode |= PHY_MODE_AC;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_5G;
- }
-
- return mode;
-}
-
static u8
mt7915_mcu_get_sta_nss(u16 mcs_map)
{
@@ -211,24 +106,13 @@ mt7915_mcu_get_sta_nss(u16 mcs_map)
static void
mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
- const u16 *mask)
+ u16 mcs_map)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
- u16 mcs_map;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80P80:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
- break;
- case NL80211_CHAN_WIDTH_160:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
- break;
- default:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
- break;
- }
for (nss = 0; nss < max_nss; nss++) {
int mcs;
@@ -266,8 +150,9 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
mcs_map &= ~(0x3 << (nss * 2));
mcs_map |= mcs << (nss * 2);
- /* only support 2ss on 160MHz */
- if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
@@ -278,6 +163,8 @@ static void
mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
const u16 *mask)
{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
u16 mcs;
@@ -299,8 +186,9 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
- /* only support 2ss on 160MHz */
- if (nss > 1 && (sta->bandwidth == IEEE80211_STA_RX_BW_160))
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->bandwidth == IEEE80211_STA_RX_BW_160)
break;
}
}
@@ -446,7 +334,7 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
- if (c->band_idx && dev->mt76.phy2)
+ if ((c->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
mphy = dev->mt76.phy2;
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -465,7 +353,7 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
- if (t->ctrl.band_idx && dev->mt76.phy2)
+ if ((t->ctrl.band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
mphy = dev->mt76.phy2;
phy = (struct mt7915_phy *)mphy->priv;
@@ -480,10 +368,15 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if (r->band_idx && dev->mt76.phy2)
+ if ((r->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
mphy = dev->mt76.phy2;
- ieee80211_radar_detected(mphy->hw);
+ if (r->band_idx == MT_RX_SEL2)
+ cfg80211_background_radar_event(mphy->hw->wiphy,
+ &dev->rdd2_chandef,
+ GFP_ATOMIC);
+ else
+ ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
}
@@ -493,9 +386,13 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
const char *data = (char *)&rxd[1];
const char *type;
+ int len = skb->len - sizeof(*rxd);
switch (rxd->s2d_index) {
case 0:
+ if (mt7915_debugfs_rx_log(dev, data, len))
+ return;
+
type = "WM";
break;
case 2:
@@ -506,8 +403,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
break;
}
- wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
- (int)(skb->len - sizeof(*rxd)), data);
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data);
}
static void
@@ -520,6 +416,22 @@ mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_bcc_notify *b;
+
+ b = (struct mt7915_mcu_bcc_notify *)skb->data;
+
+ if ((b->band_idx && !dev->phy.band_idx) && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_cca_finish, mphy->hw);
+}
+
+static void
mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
@@ -538,9 +450,7 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mcu_rx_log_message(dev, skb);
break;
case MCU_EXT_EVENT_BCC_NOTIFY:
- ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7915_mcu_cca_finish, dev);
+ mt7915_mcu_rx_bcc_notify(dev, skb);
break;
default:
break;
@@ -577,88 +487,6 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_mcu_rx_event(&dev->mt76, skb);
}
-static struct sk_buff *
-mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
- struct mt7915_sta *msta, int len)
-{
- struct sta_req_hdr hdr = {
- .bss_idx = mvif->mt76.idx,
- .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
- .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
- .muar_idx = msta && msta->wcid.sta ? mvif->mt76.omac_idx : 0xe,
- .is_tlv_append = 1,
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- skb_put_data(skb, &hdr, sizeof(hdr));
-
- return skb;
-}
-
-static struct wtbl_req_hdr *
-mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
- int cmd, void *sta_wtbl, struct sk_buff **skb)
-{
- struct tlv *sta_hdr = sta_wtbl;
- struct wtbl_req_hdr hdr = {
- .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
- .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
- .operation = cmd,
- };
- struct sk_buff *nskb = *skb;
-
- if (!nskb) {
- nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!nskb)
- return ERR_PTR(-ENOMEM);
-
- *skb = nskb;
- }
-
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, sizeof(hdr));
-
- return skb_put_data(nskb, &hdr, sizeof(hdr));
-}
-
-static struct tlv *
-mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
- void *sta_ntlv, void *sta_wtbl)
-{
- struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
- struct tlv *sta_hdr = sta_wtbl;
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
- u16 ntlv;
-
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
-
- ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
- ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
-
- if (sta_hdr) {
- u16 size = le16_to_cpu(sta_hdr->len);
-
- sta_hdr->len = cpu_to_le16(size + len);
- }
-
- return ptlv;
-}
-
-static struct tlv *
-mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
-{
- return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
-}
-
static struct tlv *
mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
__le16 *sub_ntlv, __le16 *len)
@@ -678,105 +506,6 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
}
/** bss info **/
-static int
-mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7915_phy *phy, bool enable)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_basic *bss;
- u16 wlan_idx = mvif->sta.wcid.idx;
- u32 type = NETWORK_INFRA;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable) {
- struct ieee80211_sta *sta;
- struct mt7915_sta *msta;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
- bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
- u8 idx;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0
- : mvif->mt76.omac_idx;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = idx;
-}
-
struct mt7915_he_obss_narrow_bw_ru_data {
bool tolerated;
};
@@ -829,12 +558,12 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct tlv *tlv;
int freq1 = chandef->center_freq1;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
ch = (struct bss_info_rf_ch *)tlv;
ch->pri_ch = chandef->chan->hw_value;
ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
- ch->bw = mt7915_mcu_chan_bw(chandef);
+ ch->bw = mt76_connac_chan_bw(chandef);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
int freq2 = chandef->center_freq2;
@@ -843,12 +572,7 @@ mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
- struct mt7915_dev *dev = phy->dev;
- struct mt76_phy *mphy = &dev->mt76.phy;
- bool ext_phy = phy != &dev->phy;
-
- if (ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
+ struct mt76_phy *mphy = phy->mt76;
ch->he_ru26_block =
mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
@@ -866,7 +590,7 @@ mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_ra *ra;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
ra = (struct bss_info_ra *)tlv;
ra->op_mode = vif->type == NL80211_IFTYPE_AP;
@@ -894,9 +618,9 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_he *he;
struct tlv *tlv;
- cap = mt7915_get_he_phy_cap(phy, vif);
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_he *)tlv;
he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
@@ -920,7 +644,7 @@ mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
struct bss_info_hw_amsdu *amsdu;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct bss_info_hw_amsdu *)tlv;
amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
@@ -930,26 +654,6 @@ mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
}
static void
-mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
-{
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
-}
-
-static void
mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
{
struct bss_info_bmc_rate *bmc;
@@ -957,7 +661,7 @@ mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
enum nl80211_band band = chandef->chan->band;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
bmc = (struct bss_info_bmc_rate *)tlv;
if (band == NL80211_BAND_2GHZ) {
@@ -1010,6 +714,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
struct sk_buff *skb;
if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
@@ -1017,16 +722,17 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mt7915_mcu_muar_config(phy, vif, true, enable);
}
- skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
- MT7915_BSS_UPDATE_MAX_SIZE);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* bss_omac must be first */
if (enable)
- mt7915_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (vif->type == NL80211_IFTYPE_MONITOR)
goto out;
@@ -1042,309 +748,48 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
if (mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7915_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
}
out:
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
/** starec & wtbl **/
-static int
-mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7915_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7915_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec);
- int ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7915_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-static void
-mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
-{
- struct sta_rec_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
-
- ba = (struct sta_rec_ba *)tlv;
- ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
- ba->winsize = cpu_to_le16(params->buf_size);
- ba->ssn = cpu_to_le16(params->ssn);
- ba->ba_en = enable << params->tid;
- ba->amsdu = params->amsdu;
- ba->tid = params->tid;
-}
-
-static void
-mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct wtbl_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
- wtbl_tlv, sta_wtbl);
-
- ba = (struct wtbl_ba *)tlv;
- ba->tid = params->tid;
-
- if (tx) {
- ba->ba_type = MT_BA_TYPE_ORIGINATOR;
- ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
- ba->ba_en = enable;
- } else {
- memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
- ba->ba_type = MT_BA_TYPE_RECIPIENT;
- ba->rst_ba_tid = params->tid;
- ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
- ba->rst_ba_sb = 1;
- }
-
- if (enable)
- ba->ba_winsize = cpu_to_le16(params->buf_size);
-}
-
-static int
-mt7915_mcu_sta_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
- int ret;
- if (enable && tx && !params->amsdu)
+ if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
-
- ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
- if (ret)
- return ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable)
-{
- return mt7915_mcu_sta_ba(dev, params, enable, true);
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
{
- return mt7915_mcu_sta_ba(dev, params, enable, false);
-}
-
-static void
-mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_generic *generic;
- struct wtbl_rx *rx;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
- wtbl_tlv, sta_wtbl);
-
- generic = (struct wtbl_generic *)tlv;
-
- if (sta) {
- memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->partial_aid = cpu_to_le16(sta->aid);
- generic->muar_idx = mvif->mt76.omac_idx;
- generic->qos = sta->wme;
- } else {
- /* use BSSID in station mode */
- if (vif->type == NL80211_IFTYPE_STATION)
- memcpy(generic->peer_addr, vif->bss_conf.bssid,
- ETH_ALEN);
- else
- eth_broadcast_addr(generic->peer_addr);
-
- generic->muar_idx = 0xe;
- }
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
- wtbl_tlv, sta_wtbl);
-
- rx = (struct wtbl_rx *)tlv;
- rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
- rx->rca2 = 1;
- rx->rv = 1;
-}
-
-static void
-mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
-#define EXTRA_INFO_VER BIT(0)
-#define EXTRA_INFO_NEW BIT(1)
- struct sta_rec_basic *basic;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
-
- basic = (struct sta_rec_basic *)tlv;
- basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
-
- if (enable) {
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
-
- if (!sta) {
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
- return;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_STATION:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->aid = cpu_to_le16(sta->aid);
- basic->qos = sta->wme;
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, false);
}
static void
mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
- const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
@@ -1352,7 +797,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
if (!sta->he_cap.has_he)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
he = (struct sta_rec_he *)tlv;
@@ -1376,8 +821,9 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
- if (mvif->cap.ldpc && (elem->phy_cap_info[1] &
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ if (mvif->cap.he_ldpc &&
+ (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
cap |= STA_REC_HE_CAP_LDPC;
if (elem->phy_cap_info[1] &
@@ -1434,22 +880,23 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
he->he_cap = cpu_to_le32(cap);
+ mcs_map = sta->he_cap.he_mcs_nss_supp;
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW8080],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80p80));
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW160],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_160));
fallthrough;
default:
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW80],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80));
break;
}
@@ -1480,38 +927,6 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static void
-mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
-{
- struct sta_rec_uapsd *uapsd;
- struct tlv *tlv;
-
- if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
- uapsd = (struct sta_rec_uapsd *)tlv;
-
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
- uapsd->dac_map |= BIT(3);
- uapsd->tac_map |= BIT(3);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
- uapsd->dac_map |= BIT(2);
- uapsd->tac_map |= BIT(2);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
- uapsd->dac_map |= BIT(1);
- uapsd->tac_map |= BIT(1);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
- uapsd->dac_map |= BIT(0);
- uapsd->tac_map |= BIT(0);
- }
- uapsd->max_sp = sta->max_sp;
-}
-
-static void
mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
@@ -1524,19 +939,19 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->vht_cap.vht_supported)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
mvif->cap.vht_mu_ebfer ||
mvif->cap.vht_mu_ebfee;
+ muru->cfg.mimo_ul_en = true;
+ muru->cfg.ofdma_dl_en = true;
- muru->mimo_dl.vht_mu_bfee =
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ if (sta->vht_cap.vht_supported)
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
if (!sta->he_cap.has_he)
return;
@@ -1544,13 +959,11 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
muru->mimo_dl.partial_bw_dl_mimo =
HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
- muru->cfg.mimo_ul_en = true;
muru->mimo_ul.full_ul_mimo =
HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
muru->mimo_ul.partial_ul_mimo =
HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
- muru->cfg.ofdma_dl_en = true;
muru->ofdma_dl.punc_pream_rx =
HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
muru->ofdma_dl.he_20m_in_40m_2g =
@@ -1574,7 +987,10 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_ht *ht;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ if (!sta->ht_cap.ht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
@@ -1589,7 +1005,7 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
if (!sta->vht_cap.vht_supported)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
@@ -1598,8 +1014,8 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
@@ -1612,96 +1028,27 @@ mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
if (!sta->max_amsdu_len)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
- IEEE80211_MAX_MPDU_LEN_VHT_7991;
msta->wcid.amsdu = true;
-}
-
-static void
-mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct wtbl_smps *smps;
- struct tlv *tlv;
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
- wtbl_tlv, sta_wtbl);
- smps = (struct wtbl_smps *)tlv;
- smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
-}
-
-static void
-mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_ht *ht = NULL;
- struct tlv *tlv;
-
- /* wtbl ht */
- if (sta->ht_cap.ht_supported) {
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
- wtbl_tlv, sta_wtbl);
- ht = (struct wtbl_ht *)tlv;
- ht->ldpc = mvif->cap.ldpc &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
- ht->ht = true;
- }
-
- /* wtbl vht */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *vht;
- u8 af;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
- wtbl_tlv, sta_wtbl);
- vht = (struct wtbl_vht *)tlv;
- vht->ldpc = mvif->cap.ldpc &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- vht->vht = true;
-
- af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
- if (ht)
- ht->af = max_t(u8, ht->af, af);
- }
-
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
-}
-
-static void
-mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct mt7915_sta *msta;
- struct wtbl_hdr_trans *htr = NULL;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, sizeof(*htr),
- wtbl_tlv, sta_wtbl);
- htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = true;
- if (vif->type == NL80211_IFTYPE_STATION)
- htr->to_ds = true;
- else
- htr->from_ds = true;
-
- if (!sta)
+ switch (sta->max_amsdu_len) {
+ case IEEE80211_MAX_MPDU_LEN_VHT_11454:
+ if (!is_mt7915(&dev->mt76)) {
+ amsdu->max_mpdu_size =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ return;
+ }
+ fallthrough;
+ case IEEE80211_MAX_MPDU_LEN_HT_7935:
+ case IEEE80211_MAX_MPDU_LEN_VHT_7991:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ return;
+ default:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
return;
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) {
- htr->to_ds = true;
- htr->from_ds = true;
}
}
@@ -1712,48 +1059,30 @@ mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta;
struct wtbl_req_hdr *wtbl_hdr;
+ struct mt76_wcid *wcid;
struct tlv *tlv;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ wcid = sta ? &msta->wcid : NULL;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- tlv, &skb);
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_RESET_AND_SET, tlv,
+ &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, tlv, wtbl_hdr);
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, tlv, wtbl_hdr);
-
+ mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv,
+ wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
if (sta)
- mt7915_mcu_wtbl_ht_tlv(skb, vif, sta, tlv, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
+ wtbl_hdr, mvif->cap.ht_ldpc,
+ mvif->cap.vht_ldpc);
return 0;
}
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!skb)
- return -ENOMEM;
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
- true);
-}
-
static inline bool
mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
@@ -1870,7 +1199,8 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
{
struct ieee80211_sta_he_cap *pc = &sta->he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
- const struct ieee80211_sta_he_cap *vc = mt7915_get_he_phy_cap(phy, vif);
+ const struct ieee80211_sta_he_cap *vc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
@@ -1928,8 +1258,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_phy *phy =
- mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct mt7915_phy *phy = mvif->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -1941,11 +1270,14 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
};
bool ebf;
+ if (!(sta->ht_cap.ht_supported || sta->he_cap.has_he))
+ return;
+
ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
if (!ebf && !dev->ibf)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
/* he: eBF only, in accordance with spec
@@ -1995,17 +1327,19 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_phy *phy =
- mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct mt7915_phy *phy = mvif->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
u8 nrow = 0;
+ if (!(sta->vht_cap.vht_supported || sta->he_cap.has_he))
+ return;
+
if (!mt7915_is_ebf_supported(phy, vif, sta, true))
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
if (sta->he_cap.has_he) {
@@ -2050,13 +1384,13 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct sta_rec_ra_fixed *ra;
struct sk_buff *skb;
struct tlv *tlv;
- int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
ra = (struct sta_rec_ra_fixed *)tlv;
switch (field) {
@@ -2091,19 +1425,19 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@@ -2134,9 +1468,12 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
phy.sgi |= gi << (i << (_he)); \
phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
} \
- for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \
- nrates += hweight16(mask->control[band]._mcs[i]); \
- phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \
+ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \
+ if (!mask->control[band]._mcs[i]) \
+ continue; \
+ nrates += hweight16(mask->control[band]._mcs[i]); \
+ phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ } \
} while (0)
if (sta->he_cap.has_he) {
@@ -2204,7 +1541,8 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra *ra;
@@ -2212,15 +1550,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
u32 supp_rate = sta->supp_rates[band];
u32 cap = sta->wme ? STA_CAP_WMM : 0;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra *)tlv;
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
ra->bw = sta->bandwidth;
ra->phy.bw = sta->bandwidth;
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
@@ -2254,7 +1593,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_TX_STBC;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
- if (mvif->cap.ldpc &&
+ if (mvif->cap.ht_ldpc &&
(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
@@ -2280,7 +1619,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_VHT_TX_STBC;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if (mvif->cap.ldpc &&
+ if (mvif->cap.vht_ldpc &&
(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
@@ -2291,6 +1630,10 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
if (sta->he_cap.has_he) {
ra->supp_mode |= MODE_HE;
cap |= STA_CAP_HE;
+
+ if (sta->he_6ghz_capa.capa)
+ ra->af = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
@@ -2304,8 +1647,8 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2313,7 +1656,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
* once dev->rc_work changes the settings driver should also
* update sta_rec_he here.
*/
- if (sta->he_cap.has_he && changed)
+ if (changed)
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* sta_rec_ra accommodates BW, NSS and only MCS range format
@@ -2371,18 +1714,18 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable, true);
if (!enable)
goto out;
/* tag order is in accordance with firmware dependency. */
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -2390,16 +1733,18 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
/* starec vht */
mt7915_mcu_sta_vht_tlv(skb, sta);
/* starec uapsd */
- mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
- if (sta && sta->ht_cap.ht_supported) {
+ if (sta) {
/* starec amsdu */
- mt7915_mcu_sta_amsdu_tlv(skb, vif, sta);
+ mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
/* starec he */
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* starec muru */
@@ -2409,8 +1754,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
ret = mt7915_mcu_add_group(dev, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@@ -2479,6 +1826,55 @@ mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
}
static void
+mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct bss_info_bcn_mbss *mbss;
+ const struct element *elem;
+ struct tlv *tlv;
+
+ if (!vif->bss_conf.bssid_indicator)
+ return;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID,
+ sizeof(*mbss), &bcn->sub_ntlv,
+ &bcn->len);
+
+ mbss = (struct bss_info_bcn_mbss *)tlv;
+ mbss->offset[0] = cpu_to_le16(offs->tim_offset);
+ mbss->bitmap = cpu_to_le32(1);
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID,
+ &skb->data[offs->mbssid_off],
+ skb->len - offs->mbssid_off) {
+ const struct element *sub_elem;
+
+ if (elem->datalen < 2)
+ continue;
+
+ for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
+ const u8 *data;
+
+ if (sub_elem->id || sub_elem->datalen < 4)
+ continue; /* not a valid BSS profile */
+
+ /* Find WLAN_EID_MULTI_BSSID_IDX
+ * in the merged nontransmitted profile
+ */
+ data = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ sub_elem->data,
+ sub_elem->datalen);
+ if (!data || data[1] < 1 || !data[2])
+ continue;
+
+ mbss->offset[data[2]] = cpu_to_le16(data - skb->data);
+ mbss->bitmap |= cpu_to_le32(BIT(data[2]));
+ }
+ }
+}
+
+static void
mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct sk_buff *rskb, struct sk_buff *skb,
struct bss_info_bcn *bcn,
@@ -2540,8 +1936,8 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
len);
if (ie && ie[1] >= sizeof(*ht)) {
ht = (void *)(ie + 2);
- vc->ldpc |= !!(le16_to_cpu(ht->cap_info) &
- IEEE80211_HT_CAP_LDPC_CODING);
+ vc->ht_ldpc = !!(le16_to_cpu(ht->cap_info) &
+ IEEE80211_HT_CAP_LDPC_CODING);
}
ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable,
@@ -2552,7 +1948,7 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
vht = (void *)(ie + 2);
bc = le32_to_cpu(vht->vht_cap_info);
- vc->ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC);
+ vc->vht_ldpc = !!(bc & IEEE80211_VHT_CAP_RXLDPC);
vc->vht_su_ebfer =
(bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
(pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
@@ -2571,11 +1967,13 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
mgmt->u.beacon.variable, len);
if (ie && ie[1] >= sizeof(*he) + 1) {
const struct ieee80211_sta_he_cap *pc =
- mt7915_get_he_phy_cap(phy, vif);
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
he = (void *)(ie + 3);
+ vc->he_ldpc =
+ HE_PHY(CAP1_LDPC_CODING_IN_PAYLOAD, pe->phy_cap_info[1]);
vc->he_su_ebfer =
HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) &&
HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
@@ -2601,12 +1999,17 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct tlv *tlv;
struct bss_info_bcn *bcn;
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+ bool ext_phy = phy != &dev->phy;
+
+ if (vif->bss_conf.nontransmitted)
+ return 0;
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
bcn = (struct bss_info_bcn *)tlv;
bcn->enable = en;
@@ -2623,15 +2026,15 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
return -EINVAL;
}
- if (mvif->mt76.band_idx) {
+ if (ext_phy) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
}
mt7915_mcu_beacon_check_caps(phy, vif, skb);
- /* TODO: subtag - 11v MBSSID */
mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
@@ -2640,91 +2043,20 @@ out:
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
-static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
- u32 option)
-{
- struct {
- __le32 option;
- __le32 addr;
- } req = {
- .option = cpu_to_le32(option),
- .addr = cpu_to_le32(addr),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(FW_START_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-
-static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
+static int mt7915_driver_own(struct mt7915_dev *dev, u8 band)
{
- struct {
- __le32 op;
- } req = {
- .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_SEM_CONTROL), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
-{
- struct {
- u8 check_crc;
- u8 reserved[3];
- } req = {
- .check_crc = 0,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_FINISH_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_driver_own(struct mt7915_dev *dev)
-{
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0,
- MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) {
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band),
+ MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
- return 0;
-}
+ /* clear irq when the driver own success */
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band),
+ MT_TOP_LPCR_HOST_BAND_STAT);
-static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
- u32 len, u32 mode)
-{
- struct {
- __le32 addr;
- __le32 len;
- __le32 mode;
- } req = {
- .addr = cpu_to_le32(addr),
- .len = cpu_to_le32(len),
- .mode = cpu_to_le32(mode),
- };
- int attr;
-
- if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
- attr = MCU_CMD(PATCH_START_REQ);
- else
- attr = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
-
- return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
+ return 0;
}
static int mt7915_load_patch(struct mt7915_dev *dev)
@@ -2733,7 +2065,7 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
const struct firmware *fw = NULL;
int i, ret, sem;
- sem = mt7915_mcu_patch_sem_ctrl(dev, 1);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 1);
switch (sem) {
case PATCH_IS_DL:
return 0;
@@ -2744,7 +2076,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name_var(dev, ROM_PATCH),
+ dev->mt76.dev);
if (ret)
goto out;
@@ -2776,8 +2109,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
len = be32_to_cpu(sec->info.len);
dl = fw->data + be32_to_cpu(sec->offs);
- ret = mt7915_mcu_init_download(dev, addr, len,
- DL_MODE_NEED_RSP);
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -2791,12 +2124,12 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
}
}
- ret = mt7915_mcu_start_patch(dev);
+ ret = mt76_connac_mcu_start_patch(&dev->mt76);
if (ret)
dev_err(dev->mt76.dev, "Failed to start patch\n");
out:
- sem = mt7915_mcu_patch_sem_ctrl(dev, 0);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 0);
switch (sem) {
case PATCH_REL_SEM_SUCCESS:
break;
@@ -2810,20 +2143,6 @@ out:
return ret;
}
-static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
const struct mt7915_fw_trailer *hdr,
@@ -2839,14 +2158,16 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
region = (const struct mt7915_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
- mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
override = addr;
- err = mt7915_mcu_init_download(dev, addr, len, mode);
+ err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
if (err) {
dev_err(dev->mt76.dev, "Download request failed\n");
return err;
@@ -2868,7 +2189,7 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
if (is_wa)
option |= FW_START_WORKING_PDA_CR4;
- return mt7915_mcu_start_firmware(dev, override, option);
+ return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
}
static int mt7915_load_ram(struct mt7915_dev *dev)
@@ -2877,7 +2198,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name_var(dev, FIRMWARE_WM),
+ dev->mt76.dev);
if (ret)
return ret;
@@ -2901,7 +2223,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
release_firmware(fw);
- ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name(dev, FIRMWARE_WA),
+ dev->mt76.dev);
if (ret)
return ret;
@@ -2933,10 +2256,36 @@ out:
return ret;
}
+static int
+mt7915_firmware_state(struct mt7915_dev *dev, bool wa)
+{
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
+ state, 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+ return 0;
+}
+
static int mt7915_load_firmware(struct mt7915_dev *dev)
{
int ret;
+ /* make sure fw is download state */
+ if (mt7915_firmware_state(dev, false)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ ret = mt7915_firmware_state(dev, false);
+ if (ret) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return ret;
+ }
+ }
+
ret = mt7915_load_patch(dev);
if (ret)
return ret;
@@ -2945,12 +2294,9 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_RDY), 1000)) {
- dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
- return -EIO;
- }
+ ret = mt7915_firmware_state(dev, true);
+ if (ret)
+ return ret;
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
@@ -3021,7 +2367,7 @@ int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms)
u8 band_idx;
} req = {
.cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
- .band_idx = phy != &dev->phy,
+ .band_idx = phy->band_idx,
};
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
@@ -3110,15 +2456,29 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
.headroom = sizeof(struct mt7915_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt7915_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int ret;
dev->mt76.mcu_ops = &mt7915_mcu_ops;
- ret = mt7915_driver_own(dev);
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ if (is_mt7915(&dev->mt76))
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ else
+ mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7915_driver_own(dev, 0);
if (ret)
return ret;
+ /* set driver own for band1 when two hif exist */
+ if (dev->hif2) {
+ ret = mt7915_driver_own(dev, 1);
+ if (ret)
+ return ret;
+ }
ret = mt7915_load_firmware(dev);
if (ret)
@@ -3153,14 +2513,15 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_FW_DOWNLOAD), 1000)) {
+ if (mt7915_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
return;
}
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN);
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ if (dev->hif2)
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ MT_TOP_LPCR_HOST_FW_OWN);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -3238,26 +2599,6 @@ int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable)
sizeof(req), false);
}
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
-{
- struct mt7915_dev *dev = phy->dev;
- struct {
- u8 prot_idx;
- u8 band;
- u8 rsv[2];
- __le32 len_thresh;
- __le32 pkt_thresh;
- } __packed req = {
- .prot_idx = 1,
- .band = phy != &dev->phy,
- .len_thresh = cpu_to_le32(val),
- .pkt_thresh = cpu_to_le32(0x2),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
{
struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
@@ -3303,58 +2644,6 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
return mt7915_mcu_update_edca(dev, &req);
}
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
-{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx_lo;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 wlan_idx_hi;
- u8 rsv[2];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL), &req,
- sizeof(req), true);
-}
-
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
- enum mt7915_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } __packed req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
{
struct {
@@ -3453,12 +2742,109 @@ int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
sizeof(req), true);
}
+static int
+mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef,
+ int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->chandef.chan;
+ int freq = mphy->chandef.center_freq1;
+ struct mt7915_mcu_background_chain_ctrl req = {
+ .monitor_scan_type = 2, /* simple rx */
+ };
+
+ if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP)
+ return -EINVAL;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ switch (cmd) {
+ case CH_SWITCH_BACKGROUND_SCAN_START: {
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.monitor_bw = mt76_connac_chan_bw(chandef);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 1;
+ break;
+ }
+ case CH_SWITCH_BACKGROUND_SCAN_RUNNING:
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 2;
+ break;
+ case CH_SWITCH_BACKGROUND_SCAN_STOP:
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.tx_stream = hweight8(mphy->antenna_mask);
+ req.rx_stream = mphy->antenna_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int err, region;
+
+ if (!chandef) { /* disable offchain */
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
+ 0, 0);
+ if (err)
+ return err;
+
+ return mt7915_mcu_background_chain_ctrl(phy, NULL,
+ CH_SWITCH_BACKGROUND_SCAN_STOP);
+ }
+
+ err = mt7915_mcu_background_chain_ctrl(phy, chandef,
+ CH_SWITCH_BACKGROUND_SCAN_START);
+ if (err)
+ return err;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
+ 0, region);
+}
+
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
{
+ static const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 0,
+ [NL80211_BAND_5GHZ] = 1,
+ [NL80211_BAND_6GHZ] = 2,
+ };
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1;
- bool ext_phy = phy != &dev->phy;
struct {
u8 control_ch;
u8 center_ch;
@@ -3479,11 +2865,11 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7915_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
- .band_idx = ext_phy,
- .channel_band = chandef->chan->band,
+ .band_idx = phy->band_idx,
+ .channel_band = ch_band[chandef->chan->band],
};
#ifdef CONFIG_NL80211_TESTMODE
@@ -3494,17 +2880,18 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
req.tx_streams_num = fls(phy->mt76->test.tx_antenna_mask);
req.rx_streams = phy->mt76->test.tx_antenna_mask;
- if (ext_phy) {
- req.tx_streams_num = 2;
- req.rx_streams >>= 2;
- }
+ if (phy != &dev->phy)
+ req.rx_streams >>= dev->chainshift;
}
#endif
- if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -3527,7 +2914,8 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
#define PAGE_IDX_MASK GENMASK(4, 2)
#define PER_PAGE_SIZE 0x400
struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
- u8 total = DIV_ROUND_UP(MT7915_EEPROM_SIZE, PER_PAGE_SIZE);
+ u16 eeprom_size = mt7915_eeprom_size(dev);
+ u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
int eep_len;
int i;
@@ -3536,8 +2924,8 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
struct sk_buff *skb;
int ret;
- if (i == total - 1 && !!(MT7915_EEPROM_SIZE % PER_PAGE_SIZE))
- eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE;
+ if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE))
+ eep_len = eeprom_size % PER_PAGE_SIZE;
else
eep_len = PER_PAGE_SIZE;
@@ -3770,19 +3158,26 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
/* strict order */
- static const enum mt7915_chan_mib_offs offs[] = {
- MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+ static const u32 offs[] = {
+ MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME,
+ MIB_BUSY_TIME_V2, MIB_TX_TIME_V2, MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[4];
struct sk_buff *skb;
- int i, ret;
+ int i, ret, start = 0, ofs = 20;
+
+ if (!is_mt7915(&dev->mt76)) {
+ start = 4;
+ ofs = 0;
+ }
for (i = 0; i < 4; i++) {
req[i].band = cpu_to_le32(phy != &dev->phy);
- req[i].offs = cpu_to_le32(offs[i]);
+ req[i].offs = cpu_to_le32(offs[i + start]);
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@@ -3790,7 +3185,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
if (ret)
return ret;
- res = (struct mt7915_mcu_mib *)(skb->data + 20);
+ res = (struct mt7915_mcu_mib *)(skb->data + ofs);
if (chan_switch)
goto out;
@@ -3842,7 +3237,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
u8 rsv[2];
} __packed req = {
.ctrl = {
- .band_idx = phy != &dev->phy,
+ .band_idx = phy->band_idx,
},
};
int level;
@@ -4137,6 +3532,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
case MT_PHY_TYPE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -4210,11 +3607,13 @@ int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vi
struct sk_buff *skb;
struct tlv *tlv;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, sizeof(*bss_color));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR,
+ sizeof(*bss_color));
bss_color = (struct bss_info_color *)tlv;
bss_color->disable = !he_bss_color->enabled;
bss_color->color = he_bss_color->color;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 92268e696931..960072a44222 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -79,6 +79,15 @@ struct mt7915_mcu_csa_notify {
u8 rsv;
} __packed;
+struct mt7915_mcu_bcc_notify {
+ struct mt7915_mcu_rxd rxd;
+
+ u8 band_idx;
+ u8 omac_idx;
+ u8 cca_count;
+ u8 rsv;
+} __packed;
+
struct mt7915_mcu_rdd_report {
struct mt7915_mcu_rxd rxd;
@@ -131,6 +140,29 @@ struct mt7915_mcu_rdd_report {
} hw_pulse[32];
} __packed;
+struct mt7915_mcu_background_chain_ctrl {
+ u8 chan; /* primary channel */
+ u8 central_chan; /* central channel */
+ u8 bw;
+ u8 tx_stream;
+ u8 rx_stream;
+
+ u8 monitor_chan; /* monitor channel */
+ u8 monitor_central_chan;/* monitor central channel */
+ u8 monitor_bw;
+ u8 monitor_tx_stream;
+ u8 monitor_rx_stream;
+
+ u8 scan_mode; /* 0: ScanStop
+ * 1: ScanStart
+ * 2: ScanRunning
+ */
+ u8 band_idx; /* DBDC */
+ u8 monitor_scan_type;
+ u8 band; /* 0: 2.4GHz, 1: 5GHz */
+ u8 rsv[2];
+} __packed;
+
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
@@ -161,10 +193,16 @@ struct mt7915_mcu_mib {
} __packed;
enum mt7915_chan_mib_offs {
+ /* mt7915 */
MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
- MIB_OBSS_AIRTIME = 86
+ MIB_OBSS_AIRTIME = 86,
+ /* mt7916 */
+ MIB_BUSY_TIME_V2 = 0,
+ MIB_TX_TIME_V2 = 6,
+ MIB_RX_TIME_V2 = 8,
+ MIB_OBSS_AIRTIME_V2 = 490
};
struct edca {
@@ -266,29 +304,6 @@ enum mcu_mmps_mode {
MCU_MMPS_DISABLE,
};
-#define STA_TYPE_STA BIT(0)
-#define STA_TYPE_AP BIT(1)
-#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_WDS BIT(4)
-#define STA_TYPE_BC BIT(5)
-
-#define NETWORK_INFRA BIT(16)
-#define NETWORK_P2P BIT(17)
-#define NETWORK_IBSS BIT(18)
-#define NETWORK_WDS BIT(21)
-
-#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
-#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
-#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
-#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
-#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
-
-#define CONN_STATE_DISCONNECT 0
-#define CONN_STATE_CONNECT 1
-#define CONN_STATE_PORT_SECURE 2
-
enum {
SCS_SEND_DATA,
SCS_SET_MANUAL_PD_TH,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 1f6ba306c850..5062e0d8cae4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -1,102 +1,441 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const u32 mt7915_reg[] = {
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+ [INT1_SOURCE_CSR] = 0xd7088,
+ [INT1_MASK_CSR] = 0xd708c,
+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
+ [INT_MCU_CMD_EVENT] = 0x3108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c3fffff,
+};
+
+static const u32 mt7916_reg[] = {
+ [INT_SOURCE_CSR] = 0xd4200,
+ [INT_MASK_CSR] = 0xd4204,
+ [INT1_SOURCE_CSR] = 0xd8200,
+ [INT1_MASK_CSR] = 0xd8204,
+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
+ [INT_MCU_CMD_EVENT] = 0x2108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+};
+
+static const u32 mt7986_reg[] = {
+ [INT_SOURCE_CSR] = 0x24200,
+ [INT_MASK_CSR] = 0x24204,
+ [INT1_SOURCE_CSR] = 0x28200,
+ [INT1_MASK_CSR] = 0x28204,
+ [INT_MCU_CMD_SOURCE] = 0x241f0,
+ [INT_MCU_CMD_EVENT] = 0x54000108,
+ [WFDMA0_ADDR] = 0x24000,
+ [WFDMA0_PCIE1_ADDR] = 0x28000,
+ [WFDMA_EXT_CSR_ADDR] = 0x27000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+};
+
+static const u32 mt7915_offs[] = {
+ [TMAC_CDTR] = 0x090,
+ [TMAC_ODTR] = 0x094,
+ [TMAC_ATCR] = 0x098,
+ [TMAC_TRCR0] = 0x09c,
+ [TMAC_ICR0] = 0x0a4,
+ [TMAC_ICR1] = 0x0b4,
+ [TMAC_CTCR0] = 0x0f4,
+ [TMAC_TFCR0] = 0x1e0,
+ [MDP_BNRCFR0] = 0x070,
+ [MDP_BNRCFR1] = 0x074,
+ [ARB_DRNGR0] = 0x194,
+ [ARB_SCR] = 0x080,
+ [RMAC_MIB_AIRTIME14] = 0x3b8,
+ [AGG_AWSCR0] = 0x05c,
+ [AGG_PCR0] = 0x06c,
+ [AGG_ACR0] = 0x084,
+ [AGG_MRCR] = 0x098,
+ [AGG_ATCR1] = 0x0f0,
+ [AGG_ATCR3] = 0x0f4,
+ [LPON_UTTR0] = 0x080,
+ [LPON_UTTR1] = 0x084,
+ [LPON_FRCR] = 0x314,
+ [MIB_SDR3] = 0x014,
+ [MIB_SDR4] = 0x018,
+ [MIB_SDR5] = 0x01c,
+ [MIB_SDR7] = 0x024,
+ [MIB_SDR8] = 0x028,
+ [MIB_SDR9] = 0x02c,
+ [MIB_SDR10] = 0x030,
+ [MIB_SDR11] = 0x034,
+ [MIB_SDR12] = 0x038,
+ [MIB_SDR13] = 0x03c,
+ [MIB_SDR14] = 0x040,
+ [MIB_SDR15] = 0x044,
+ [MIB_SDR16] = 0x048,
+ [MIB_SDR17] = 0x04c,
+ [MIB_SDR18] = 0x050,
+ [MIB_SDR19] = 0x054,
+ [MIB_SDR20] = 0x058,
+ [MIB_SDR21] = 0x05c,
+ [MIB_SDR22] = 0x060,
+ [MIB_SDR23] = 0x064,
+ [MIB_SDR24] = 0x068,
+ [MIB_SDR25] = 0x06c,
+ [MIB_SDR27] = 0x074,
+ [MIB_SDR28] = 0x078,
+ [MIB_SDR29] = 0x07c,
+ [MIB_SDRVEC] = 0x080,
+ [MIB_SDR31] = 0x084,
+ [MIB_SDR32] = 0x088,
+ [MIB_SDRMUBF] = 0x090,
+ [MIB_DR8] = 0x0c0,
+ [MIB_DR9] = 0x0c4,
+ [MIB_DR11] = 0x0cc,
+ [MIB_MB_SDR0] = 0x100,
+ [MIB_MB_SDR1] = 0x104,
+ [TX_AGG_CNT] = 0x0a8,
+ [TX_AGG_CNT2] = 0x164,
+ [MIB_ARNG] = 0x4b8,
+ [WTBLON_TOP_WDUCR] = 0x0,
+ [WTBL_UPDATE] = 0x030,
+ [PLE_FL_Q_EMPTY] = 0x0b0,
+ [PLE_FL_Q_CTRL] = 0x1b0,
+ [PLE_AC_QEMPTY] = 0x500,
+ [PLE_FREEPG_CNT] = 0x100,
+ [PLE_FREEPG_HEAD_TAIL] = 0x104,
+ [PLE_PG_HIF_GROUP] = 0x110,
+ [PLE_HIF_PG_INFO] = 0x114,
+ [AC_OFFSET] = 0x040,
+ [ETBF_PAR_RPT0] = 0x068,
+};
+
+static const u32 mt7916_offs[] = {
+ [TMAC_CDTR] = 0x0c8,
+ [TMAC_ODTR] = 0x0cc,
+ [TMAC_ATCR] = 0x00c,
+ [TMAC_TRCR0] = 0x010,
+ [TMAC_ICR0] = 0x014,
+ [TMAC_ICR1] = 0x018,
+ [TMAC_CTCR0] = 0x114,
+ [TMAC_TFCR0] = 0x0e4,
+ [MDP_BNRCFR0] = 0x090,
+ [MDP_BNRCFR1] = 0x094,
+ [ARB_DRNGR0] = 0x1e0,
+ [ARB_SCR] = 0x000,
+ [RMAC_MIB_AIRTIME14] = 0x0398,
+ [AGG_AWSCR0] = 0x030,
+ [AGG_PCR0] = 0x040,
+ [AGG_ACR0] = 0x054,
+ [AGG_MRCR] = 0x068,
+ [AGG_ATCR1] = 0x1a8,
+ [AGG_ATCR3] = 0x080,
+ [LPON_UTTR0] = 0x360,
+ [LPON_UTTR1] = 0x364,
+ [LPON_FRCR] = 0x37c,
+ [MIB_SDR3] = 0x698,
+ [MIB_SDR4] = 0x788,
+ [MIB_SDR5] = 0x780,
+ [MIB_SDR7] = 0x5a8,
+ [MIB_SDR8] = 0x78c,
+ [MIB_SDR9] = 0x024,
+ [MIB_SDR10] = 0x76c,
+ [MIB_SDR11] = 0x790,
+ [MIB_SDR12] = 0x558,
+ [MIB_SDR13] = 0x560,
+ [MIB_SDR14] = 0x564,
+ [MIB_SDR15] = 0x568,
+ [MIB_SDR16] = 0x7fc,
+ [MIB_SDR17] = 0x800,
+ [MIB_SDR18] = 0x030,
+ [MIB_SDR19] = 0x5ac,
+ [MIB_SDR20] = 0x5b0,
+ [MIB_SDR21] = 0x5b4,
+ [MIB_SDR22] = 0x770,
+ [MIB_SDR23] = 0x774,
+ [MIB_SDR24] = 0x778,
+ [MIB_SDR25] = 0x77c,
+ [MIB_SDR27] = 0x080,
+ [MIB_SDR28] = 0x084,
+ [MIB_SDR29] = 0x650,
+ [MIB_SDRVEC] = 0x5a8,
+ [MIB_SDR31] = 0x55c,
+ [MIB_SDR32] = 0x7a8,
+ [MIB_SDRMUBF] = 0x7ac,
+ [MIB_DR8] = 0x56c,
+ [MIB_DR9] = 0x570,
+ [MIB_DR11] = 0x574,
+ [MIB_MB_SDR0] = 0x688,
+ [MIB_MB_SDR1] = 0x690,
+ [TX_AGG_CNT] = 0x7dc,
+ [TX_AGG_CNT2] = 0x7ec,
+ [MIB_ARNG] = 0x0b0,
+ [WTBLON_TOP_WDUCR] = 0x200,
+ [WTBL_UPDATE] = 0x230,
+ [PLE_FL_Q_EMPTY] = 0x360,
+ [PLE_FL_Q_CTRL] = 0x3e0,
+ [PLE_AC_QEMPTY] = 0x600,
+ [PLE_FREEPG_CNT] = 0x380,
+ [PLE_FREEPG_HEAD_TAIL] = 0x384,
+ [PLE_PG_HIF_GROUP] = 0x00c,
+ [PLE_HIF_PG_INFO] = 0x388,
+ [AC_OFFSET] = 0x080,
+ [ETBF_PAR_RPT0] = 0x100,
+};
+
+static const struct __map mt7915_reg_map[] = {
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct __map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x1000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct __map mt7986_reg_map[] = {
+ { 0x54000000, 0x402000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x403000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x404000, 0x1000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x405000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x406000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x407000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x408000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x40c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x40e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x420000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x420400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x420800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x420c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x421000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x421400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x421c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x421e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x422000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x423400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x424000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x424200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x424600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x424800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x426000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x430000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x480000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x490000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x820f0000, 0x4a0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0x4a0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0x4a0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0x4a0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0x4a1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0x4a1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0x4a1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0x4a3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0x4a4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0x4a4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0x4a4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0x4a4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0x4a8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0x4ae000, 0x1000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0x4b0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0x4c0000, 0x10000}, /* WF_TOP_MISC_ON */
+ { 0x89000000, 0x4d0000, 0x1000 }, /* WF_MCU_CFG_ON */
+ { 0x89010000, 0x4d1000, 0x1000 }, /* WF_MCU_CIRQ */
+ { 0x89020000, 0x4d2000, 0x1000 }, /* WF_MCU_GPT */
+ { 0x89030000, 0x4d3000, 0x1000 }, /* WF_MCU_WDT */
+ { 0x80010000, 0x4d4000, 0x1000 }, /* WF_AXIDMA */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_remap;
+
+ if (is_mt7986(&dev->mt76))
+ return MT_CONN_INFRA_OFFSET(addr);
- mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ l1_remap = is_mt7915(&dev->mt76) ?
+ MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
+
+ dev->bus_ops->rmw(&dev->mt76, l1_remap,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
/* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L1);
+ dev->bus_ops->rr(&dev->mt76, l1_remap);
return MT_HIF_REMAP_BASE_L1 + offset;
}
static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ u32 offset, base;
- mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
- /* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L2);
+ if (is_mt7915(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
- return MT_HIF_REMAP_BASE_L2 + offset;
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ } else {
+ u32 ofs = is_mt7986(&dev->mt76) ? 0x400000 : 0;
+
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs,
+ MT_HIF_REMAP_L2_MASK_MT7916,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs);
+
+ offset += (MT_HIF_REMAP_BASE_L2_MT7916 + ofs);
+ }
+
+ return offset;
}
static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
int i;
if (addr < 0x100000)
return addr;
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ if (!dev->reg.map) {
+ dev_err(dev->mt76.dev, "err: reg_map is null\n");
+ return addr;
+ }
+
+ for (i = 0; i < dev->reg.map_size; i++) {
u32 ofs;
- if (addr < fixed_map[i].phys)
+ if (addr < dev->reg.map[i].phys)
continue;
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
+ ofs = addr - dev->reg.map[i].phys;
+ if (ofs > dev->reg.map[i].size)
continue;
- return fixed_map[i].mapped + ofs;
+ return dev->reg.map[i].maps + ofs;
}
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000))
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+ return mt7915_reg_map_l1(dev, addr);
+
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ return mt7915_reg_map_l1(dev, addr);
+
+ /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+ if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
+ addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
return mt7915_reg_map_l1(dev, addr);
+ }
return mt7915_reg_map_l2(dev, addr);
}
@@ -125,7 +464,9 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
+static int mt7915_mmio_init(struct mt76_dev *mdev,
+ void __iomem *mem_base,
+ u32 device_id)
{
struct mt76_bus_ops *bus_ops;
struct mt7915_dev *dev;
@@ -133,6 +474,29 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ switch (device_id) {
+ case 0x7915:
+ dev->reg.reg_rev = mt7915_reg;
+ dev->reg.offs_rev = mt7915_offs;
+ dev->reg.map = mt7915_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7915_reg_map);
+ break;
+ case 0x7906:
+ dev->reg.reg_rev = mt7916_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7916_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
+ break;
+ case 0x7986:
+ dev->reg.reg_rev = mt7986_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7986_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7986_reg_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
@@ -144,11 +508,210 @@ int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
bus_ops->rmw = mt7915_rmw;
dev->mt76.bus = bus_ops;
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ mdev->rev = (device_id << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ return 0;
+}
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
+ bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
+ enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
+ u32 intr, intr1, mask;
+
mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX(MT_RXQ_MAIN))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+
+ if (intr & MT_INT_RX(MT_RXQ_EXT))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+
+ if (!is_mt7915(&dev->mt76) &&
+ (intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
+
+ if (intr & MT_INT_RX(MT_RXQ_EXT_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+}
+
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt7915_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_check = mt7915_rx_check,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_ps = mt7915_sta_ps,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct ieee80211_ops *ops;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(pdev, &mt7915_ops, sizeof(mt7915_ops), GFP_KERNEL);
+ if (!ops)
+ return ERR_PTR(-ENOMEM);
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ ret = mt7915_mmio_init(mdev, mem_base, device_id);
+ if (ret)
+ goto error;
+
+ tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ return dev;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ERR_PTR(ret);
+}
+
+static int __init mt7915_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7915_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7915_pci_driver);
+ if (ret)
+ goto error_pci;
+
+ if (IS_ENABLED(CONFIG_MT7986_WMAC)) {
+ ret = platform_driver_register(&mt7986_wmac_driver);
+ if (ret)
+ goto error_wmac;
+ }
return 0;
+
+error_wmac:
+ pci_unregister_driver(&mt7915_pci_driver);
+error_pci:
+ pci_unregister_driver(&mt7915_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7915_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7986_WMAC))
+ platform_driver_unregister(&mt7986_wmac_driver);
+
+ pci_unregister_driver(&mt7915_pci_driver);
+ pci_unregister_driver(&mt7915_hif_driver);
}
+
+module_init(mt7915_init);
+module_exit(mt7915_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 42d887383e8d..6efa0a2e2345 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -6,13 +6,14 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
-#include "../mt76.h"
+#include "../mt76_connac.h"
#include "regs.h"
#define MT7915_MAX_INTERFACES 19
#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
-#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
+#define MT7916_WTBL_SIZE 544
+#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
MT7915_MAX_INTERFACES)
@@ -30,10 +31,28 @@
#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+#define MT7916_FIRMWARE_WA "mediatek/mt7916_wa.bin"
+#define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin"
+#define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin"
+
+#define MT7986_FIRMWARE_WA "mediatek/mt7986_wa.bin"
+#define MT7986_FIRMWARE_WM "mediatek/mt7986_wm.bin"
+#define MT7986_FIRMWARE_WM_MT7975 "mediatek/mt7986_wm_mt7975.bin"
+#define MT7986_ROM_PATCH "mediatek/mt7986_rom_patch.bin"
+#define MT7986_ROM_PATCH_MT7975 "mediatek/mt7986_rom_patch_mt7975.bin"
+
#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
+#define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin"
+#define MT7986_EEPROM_MT7975_DEFAULT "mediatek/mt7986_eeprom_mt7975.bin"
+#define MT7986_EEPROM_MT7975_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7975_dual.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT "mediatek/mt7986_eeprom_mt7976.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT_DBDC "mediatek/mt7986_eeprom_mt7976_dbdc.bin"
+#define MT7986_EEPROM_MT7976_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7976_dual.bin"
#define MT7915_EEPROM_SIZE 3584
+#define MT7916_EEPROM_SIZE 4096
+
#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
@@ -41,11 +60,13 @@
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7915_THERMAL_THROTTLE_MAX 100
+#define MT7915_CDEV_THROTTLE_MAX 99
#define MT7915_SKU_RATE_NUM 161
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
+#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
@@ -68,9 +89,13 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA_EXT,
};
-struct mt7915_sta_key_conf {
- s8 keyidx;
- u8 key[16];
+enum mt7916_rxq_id {
+ MT7916_RXQ_MCU_WM = 0,
+ MT7916_RXQ_MCU_WA,
+ MT7916_RXQ_MCU_WA_MAIN,
+ MT7916_RXQ_MCU_WA_EXT,
+ MT7916_RXQ_BAND0,
+ MT7916_RXQ_BAND1,
};
struct mt7915_twt_flow {
@@ -104,7 +129,7 @@ struct mt7915_sta {
struct mt76_sta_stats stats;
- struct mt7915_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
struct {
u8 flowid_mask;
@@ -113,7 +138,9 @@ struct mt7915_sta {
};
struct mt7915_vif_cap {
- bool ldpc:1;
+ bool ht_ldpc:1;
+ bool vht_ldpc:1;
+ bool he_ldpc:1;
bool vht_su_ebfer:1;
bool vht_su_ebfee:1;
bool vht_mu_ebfer:1;
@@ -200,16 +227,18 @@ struct mt7915_phy {
struct mt76_phy *mt76;
struct mt7915_dev *dev;
- struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
+ struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
struct ieee80211_vif *monitor_vif;
struct thermal_cooling_device *cdev;
+ u8 cdev_state;
u8 throttle_state;
u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
u32 rxfilter;
u64 omac_mask;
+ u8 band_idx;
u16 noise;
@@ -217,7 +246,6 @@ struct mt7915_phy {
u8 slottime;
u8 rdd_state;
- int dfs_state;
u32 rx_ampdu_ts;
u32 ampdu_ref;
@@ -247,12 +275,21 @@ struct mt7915_dev {
};
struct mt7915_hif *hif2;
+ struct mt7915_reg_desc reg;
+ u8 q_id[MT7915_MAX_QUEUE];
+ u32 q_int_mask[MT7915_MAX_QUEUE];
+ u32 wfdma_mask;
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
struct mt7915_phy phy;
+ /* monitor rx chain configured channel */
+ struct cfg80211_chan_def rdd2_chandef;
+ struct mt7915_phy *rdd2_phy;
+
u16 chainmask;
+ u16 chainshift;
u32 hif_idx;
struct work_struct init_work;
@@ -274,6 +311,10 @@ struct mt7915_dev {
bool ibf;
u8 fw_debug_wm;
u8 fw_debug_wa;
+ u8 fw_debug_bin;
+
+ struct dentry *debugfs_dir;
+ struct rchan *relay_fwlog;
void *cal;
@@ -281,6 +322,17 @@ struct mt7915_dev {
u8 table_mask;
u8 n_agrt;
} twt;
+
+ struct reset_control *rstc;
+ void __iomem *dcm;
+ void __iomem *sku;
+};
+
+enum {
+ WFDMA0 = 0x0,
+ WFDMA1,
+ WFDMA_EXT,
+ __MT_WFDMA_MAX,
};
enum {
@@ -300,6 +352,7 @@ enum {
enum {
MT_RX_SEL0,
MT_RX_SEL1,
+ MT_RX_SEL2, /* monitor chain */
};
enum mt7915_rdd_cmd {
@@ -345,21 +398,44 @@ mt7915_ext_phy(struct mt7915_dev *dev)
return phy->priv;
}
-static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
+static inline u32 mt7915_check_adie(struct mt7915_dev *dev, bool sku)
{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
+ u32 mask = sku ? MT_CONNINFRA_SKU_MASK : MT_ADIE_TYPE_MASK;
+
+ if (!is_mt7986(&dev->mt76))
+ return 0;
+
+ return mt76_rr(dev, MT_CONNINFRA_SKU_DEC_ADDR) & mask;
}
extern const struct ieee80211_ops mt7915_ops;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
+extern struct pci_driver mt7915_pci_driver;
+extern struct pci_driver mt7915_hif_driver;
+extern struct platform_driver mt7986_wmac_driver;
+
+#ifdef CONFIG_MT7986_WMAC
+int mt7986_wmac_enable(struct mt7915_dev *dev);
+void mt7986_wmac_disable(struct mt7915_dev *dev);
+#else
+static inline int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ return 0;
+}
-u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
+static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+}
+#endif
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
@@ -378,18 +454,12 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -417,8 +487,6 @@ int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
u8 en);
int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
@@ -436,17 +504,22 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
- u8 index, u8 rx_sel, u8 val);
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
-static inline bool is_mt7915(struct mt76_dev *dev)
+static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
+{
+ return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
+}
+
+static inline u16 mt7915_eeprom_size(struct mt7915_dev *dev)
{
- return mt76_chip(dev) == 0x7915;
+ return is_mt7915(&dev->mt76) ? MT7915_EEPROM_SIZE : MT7916_EEPROM_SIZE;
}
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
@@ -487,7 +560,6 @@ void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
struct mt7915_sta *msta,
u8 flowid);
@@ -500,7 +572,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
+int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
@@ -514,6 +586,8 @@ void mt7915_update_channel(struct mt76_phy *mphy);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_init_debugfs(struct mt7915_phy *phy);
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 8130ea43971f..6f819c41a4c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -18,35 +18,17 @@ static u32 hif_idx;
static const struct pci_device_id mt7915_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7906) },
{ },
};
static const struct pci_device_id mt7915_hif_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x790a) },
{ },
};
-void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
- u32 clear, u32 set)
-{
- struct mt76_dev *mdev = &dev->mt76;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
-
- mdev->mmio.irqmask &= ~clear;
- mdev->mmio.irqmask |= set;
-
- if (write_reg) {
- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
- }
-
- spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
-}
-
-static struct mt7915_hif *
-mt7915_pci_get_hif2(struct mt7915_dev *dev)
+static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
{
struct mt7915_hif *hif;
u32 val;
@@ -56,7 +38,7 @@ mt7915_pci_get_hif2(struct mt7915_dev *dev)
list_for_each_entry(hif, &hif_list, list) {
val = readl(hif->regs + MT_PCIE_RECOG_ID);
val &= MT_PCIE_RECOG_ID_MASK;
- if (val != dev->hif_idx)
+ if (val != idx)
continue;
get_device(hif->dev);
@@ -78,123 +60,17 @@ static void mt7915_put_hif2(struct mt7915_hif *hif)
put_device(hif->dev);
}
-static void
-mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- static const u32 rx_irq_mask[] = {
- [MT_RXQ_MAIN] = MT_INT_RX_DONE_DATA0,
- [MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
- [MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
- [MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
- [MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
- };
-
- mt7915_irq_enable(dev, rx_irq_mask[q]);
-}
-
-/* TODO: support 2/4/6/8 MSI-X vectors */
-static void mt7915_irq_tasklet(struct tasklet_struct *t)
+static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
- struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
- u32 intr, intr1, mask;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- intr &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (dev->hif2) {
- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
- intr1 &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
-
- intr |= intr1;
- }
-
- trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
-
- mask = intr & MT_INT_RX_DONE_ALL;
- if (intr & MT_INT_TX_DONE_MCU)
- mask |= MT_INT_TX_DONE_MCU;
+ hif_idx++;
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
+ return NULL;
- mt7915_irq_disable(dev, mask);
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
- if (intr & MT_INT_TX_DONE_MCU)
- napi_schedule(&dev->mt76.tx_napi);
-
- if (intr & MT_INT_RX_DONE_DATA0)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
-
- if (intr & MT_INT_RX_DONE_DATA1)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
-
- if (intr & MT_INT_RX_DONE_WM)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
-
- if (intr & MT_INT_RX_DONE_WA)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
-
- if (intr & MT_INT_RX_DONE_WA_EXT)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
-
- if (intr & MT_INT_MCU_CMD) {
- u32 val = mt76_rr(dev, MT_MCU_CMD);
-
- mt76_wr(dev, MT_MCU_CMD, val);
- if (val & MT_MCU_CMD_ERROR_MASK) {
- dev->reset_state = val;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
- wake_up(&dev->reset_wait);
- }
- }
-}
-
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
-{
- struct mt7915_dev *dev = dev_instance;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
- return IRQ_NONE;
-
- tasklet_schedule(&dev->irq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
-{
- struct mt7915_hif *hif;
-
- dev->hif_idx = ++hif_idx;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL))
- return;
-
- mt76_wr(dev, MT_PCIE_RECOG_ID, dev->hif_idx | MT_PCIE_RECOG_ID_SEM);
-
- hif = mt7915_pci_get_hif2(dev);
- if (!hif)
- return;
-
- dev->hif2 = hif;
-
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME "-hif", dev)) {
- mt7915_put_hif2(hif);
- hif = NULL;
- }
-
- /* master switch of PCIe tnterrupt enable */
- mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ return mt7915_pci_get_hif2(hif_idx);
}
static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
@@ -219,26 +95,10 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .token_size = MT7915_TOKEN_SIZE,
- .tx_prepare_skb = mt7915_tx_prepare_skb,
- .tx_complete_skb = mt7915_tx_complete_skb,
- .rx_skb = mt7915_queue_rx_skb,
- .rx_check = mt7915_rx_check,
- .rx_poll_complete = mt7915_rx_poll_complete,
- .sta_ps = mt7915_sta_ps,
- .sta_add = mt7915_mac_sta_add,
- .sta_remove = mt7915_mac_sta_remove,
- .update_survey = mt7915_update_channel,
- };
struct mt7915_dev *dev;
struct mt76_dev *mdev;
+ struct mt7915_hif *hif2;
+ int irq;
int ret;
ret = pcim_enable_device(pdev);
@@ -257,48 +117,65 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
mt76_pci_disable_aspm(pdev);
- if (id->device == 0x7916)
+ if (id->device == 0x7916 || id->device == 0x790a)
return mt7915_pci_hif2_probe(pdev);
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
+ dev = mt7915_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ id->device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
- dev = container_of(mdev, struct mt7915_dev, mt76);
+ mdev = &dev->mt76;
+ hif2 = mt7915_pci_init_hif2(pdev);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
- goto free;
+ goto free_device;
- ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
+ irq = pdev->irq;
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
- goto error;
-
- tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+ goto free_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
+ if (hif2) {
+ dev->hif2 = hif2;
- mt7915_pci_init_hif2(dev);
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
+
+ ret = devm_request_irq(mdev->dev, dev->hif2->irq,
+ mt7915_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME "-hif", dev);
+ if (ret)
+ goto free_hif2;
+ }
ret = mt7915_register_device(dev);
if (ret)
- goto free_irq;
+ goto free_hif2_irq;
return 0;
-free_irq:
- devm_free_irq(mdev->dev, pdev->irq, dev);
-error:
+
+free_hif2_irq:
+ if (dev->hif2)
+ devm_free_irq(mdev->dev, dev->hif2->irq, dev);
+free_hif2:
+ if (dev->hif2)
+ put_device(dev->hif2->dev);
+ devm_free_irq(mdev->dev, irq, dev);
+free_irq_vector:
pci_free_irq_vectors(pdev);
-free:
+free_device:
mt76_free_device(&dev->mt76);
return ret;
@@ -322,47 +199,25 @@ static void mt7915_pci_remove(struct pci_dev *pdev)
mt7915_unregister_device(dev);
}
-static struct pci_driver mt7915_hif_driver = {
+struct pci_driver mt7915_hif_driver = {
.name = KBUILD_MODNAME "_hif",
.id_table = mt7915_hif_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_hif_remove,
};
-static struct pci_driver mt7915_pci_driver = {
+struct pci_driver mt7915_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7915_pci_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_pci_remove,
};
-static int __init mt7915_init(void)
-{
- int ret;
-
- ret = pci_register_driver(&mt7915_hif_driver);
- if (ret)
- return ret;
-
- ret = pci_register_driver(&mt7915_pci_driver);
- if (ret)
- pci_unregister_driver(&mt7915_hif_driver);
-
- return ret;
-}
-
-static void __exit mt7915_exit(void)
-{
- pci_unregister_driver(&mt7915_pci_driver);
- pci_unregister_driver(&mt7915_hif_driver);
-}
-
-module_init(mt7915_init);
-module_exit(mt7915_exit);
-
MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
MODULE_FIRMWARE(MT7915_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(MT7916_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7916_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7916_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 59693535b098..e5f93c40591c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -4,41 +4,152 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
+struct __map {
+ u32 phys;
+ u32 maps;
+ u32 size;
+};
+
+/* used to differentiate between generations */
+struct mt7915_reg_desc {
+ const u32 *reg_rev;
+ const u32 *offs_rev;
+ const struct __map *map;
+ u32 map_size;
+};
+
+enum reg_rev {
+ INT_SOURCE_CSR,
+ INT_MASK_CSR,
+ INT1_SOURCE_CSR,
+ INT1_MASK_CSR,
+ INT_MCU_CMD_SOURCE,
+ INT_MCU_CMD_EVENT,
+ WFDMA0_ADDR,
+ WFDMA0_PCIE1_ADDR,
+ WFDMA_EXT_CSR_ADDR,
+ CBTOP1_PHY_END,
+ INFRA_MCU_ADDR_END,
+ __MT_REG_MAX,
+};
+
+enum offs_rev {
+ TMAC_CDTR,
+ TMAC_ODTR,
+ TMAC_ATCR,
+ TMAC_TRCR0,
+ TMAC_ICR0,
+ TMAC_ICR1,
+ TMAC_CTCR0,
+ TMAC_TFCR0,
+ MDP_BNRCFR0,
+ MDP_BNRCFR1,
+ ARB_DRNGR0,
+ ARB_SCR,
+ RMAC_MIB_AIRTIME14,
+ AGG_AWSCR0,
+ AGG_PCR0,
+ AGG_ACR0,
+ AGG_MRCR,
+ AGG_ATCR1,
+ AGG_ATCR3,
+ LPON_UTTR0,
+ LPON_UTTR1,
+ LPON_FRCR,
+ MIB_SDR3,
+ MIB_SDR4,
+ MIB_SDR5,
+ MIB_SDR7,
+ MIB_SDR8,
+ MIB_SDR9,
+ MIB_SDR10,
+ MIB_SDR11,
+ MIB_SDR12,
+ MIB_SDR13,
+ MIB_SDR14,
+ MIB_SDR15,
+ MIB_SDR16,
+ MIB_SDR17,
+ MIB_SDR18,
+ MIB_SDR19,
+ MIB_SDR20,
+ MIB_SDR21,
+ MIB_SDR22,
+ MIB_SDR23,
+ MIB_SDR24,
+ MIB_SDR25,
+ MIB_SDR27,
+ MIB_SDR28,
+ MIB_SDR29,
+ MIB_SDRVEC,
+ MIB_SDR31,
+ MIB_SDR32,
+ MIB_SDRMUBF,
+ MIB_DR8,
+ MIB_DR9,
+ MIB_DR11,
+ MIB_MB_SDR0,
+ MIB_MB_SDR1,
+ TX_AGG_CNT,
+ TX_AGG_CNT2,
+ MIB_ARNG,
+ WTBLON_TOP_WDUCR,
+ WTBL_UPDATE,
+ PLE_FL_Q_EMPTY,
+ PLE_FL_Q_CTRL,
+ PLE_AC_QEMPTY,
+ PLE_FREEPG_CNT,
+ PLE_FREEPG_HEAD_TAIL,
+ PLE_PG_HIF_GROUP,
+ PLE_HIF_PG_INFO,
+ AC_OFFSET,
+ ETBF_PAR_RPT0,
+ __MT_OFFS_MAX,
+};
+
+#define __REG(id) (dev->reg.reg_rev[(id)])
+#define __OFFS(id) (dev->reg.offs_rev[(id)])
+
/* MCU WFDMA0 */
#define MT_MCU_WFDMA0_BASE 0x2000
#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
+
#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
/* MCU WFDMA1 */
#define MT_MCU_WFDMA1_BASE 0x3000
#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
-#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT __REG(INT_MCU_CMD_EVENT)
#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
-#define MT_PLE_BASE 0x8000
+/* PLE */
+#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_FL_Q_EMPTY 0x0b0
-#define MT_FL_Q0_CTRL 0x1b0
-#define MT_FL_Q2_CTRL 0x1b8
-#define MT_FL_Q3_CTRL 0x1bc
-
-#define MT_PLE_FREEPG_CNT MT_PLE(0x100)
-#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104)
-#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110)
-#define MT_PLE_HIF_PG_INFO MT_PLE(0x114)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \
- ((n) << 2))
+#define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY))
+#define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL))
+#define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8)
+#define MT_FL_Q3_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0xc)
+
+#define MT_PLE_FREEPG_CNT MT_PLE(__OFFS(PLE_FREEPG_CNT))
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(__OFFS(PLE_FREEPG_HEAD_TAIL))
+#define MT_PLE_PG_HIF_GROUP MT_PLE(__OFFS(PLE_PG_HIF_GROUP))
+#define MT_PLE_HIF_PG_INFO MT_PLE(__OFFS(PLE_HIF_PG_INFO))
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(__OFFS(PLE_AC_QEMPTY) + \
+ __OFFS(AC_OFFSET) * \
+ (ac) + ((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
-#define MT_PSE_BASE 0xc000
+#define MT_PSE_BASE 0x820c8000
#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
-#define MT_MDP_BASE 0xf000
+/* WF MDP TOP */
+#define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000)
@@ -47,73 +158,76 @@
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
-#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
+ ((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
-#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_BNRCFR1(_band) MT_MDP(__OFFS(MDP_BNRCFR1) + \
+ ((_band) << 8))
#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
-/* TMAC: band 0(0x21000), band 1(0xa1000) */
-#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
-#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
-#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CDTR))
+ #define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ODTR))
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
-#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, 0x098)
+#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ATCR))
#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
-#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TRCR0))
#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
-#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
-#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR0))
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
-#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4)
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR1))
#define MT_IFS_EIFS_CCK GENMASK(8, 0)
-#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CTCR0))
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
+#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TFCR0))
-#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+/* WF DMA TOP: band 0(0x820e7000),band 1(0x820f7000) */
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
-/* ETBF: band 0(0x24000), band 1(0xa4000) */
-#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
+/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
-#define MT_ETBF_RX_FB_CONT(_band) MT_WF_ETBF(_band, 0x068)
-#define MT_ETBF_RX_FB_BW GENMASK(7, 6)
-#define MT_ETBF_RX_FB_NC GENMASK(5, 3)
-#define MT_ETBF_RX_FB_NR GENMASK(2, 0)
+#define MT_ETBF_PAR_RPT0(_band) MT_WF_ETBF(_band, __OFFS(ETBF_PAR_RPT0))
+#define MT_ETBF_PAR_RPT0_FB_BW GENMASK(7, 6)
+#define MT_ETBF_PAR_RPT0_FB_NC GENMASK(5, 3)
+#define MT_ETBF_PAR_RPT0_FB_NR GENMASK(2, 0)
#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
@@ -125,174 +239,209 @@
#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
-/* LPON: band 0(0x24200), band 1(0xa4200) */
-#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+/* LPON: band 0(0x820eb000), band 1(0x820fb000) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
-#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
-#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR0))
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR1))
+#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, __OFFS(LPON_FRCR))
-#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 1))
+#define MT_LPON_TCR_MT7916(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 4))
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
#define MT_LPON_TCR_SW_ADJUST BIT(1)
#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
-/* MIB: band 0(0x24800), band 1(0xa4800) */
+/* MIB: band 0(0x820ed000), band 1(0x820fd000) */
/* These counters are (mostly?) clear-on-read. So, some should not
* be read at all in case firmware is already reading them. These
* are commented with 'DNR' below. The DNR stats will be read by querying
* the firmware API for the appropriate message. For counters the driver
* does read, the driver should accumulate the counters.
*/
-#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR3))
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR3_FCS_ERR_MASK_MT7916 GENMASK(31, 16)
-#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018)
+#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR4))
#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
/* rx mpdu counter, full 32 bits */
-#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c)
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR5))
#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024)
+#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR7))
#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028)
+#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR8))
#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
/* aka CCA_NAV_TX_TIME */
-#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
+#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030)
-#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
+#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
-#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034)
+#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR11))
#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
/* tx ampdu cnt, full 32 bits */
-#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038)
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR12))
-#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c)
+#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR13))
#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
/* counts all mpdus in ampdu, regardless of success */
-#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR14))
#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916 GENMASK(31, 0)
/* counts all successfully tx'd mpdus in ampdu */
-#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR15))
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
/* in units of 'us' */
-#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c)
+#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050)
+#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
/* units are us */
-#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054)
+#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058)
+#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c)
+#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
/* rx ampdu count, 32-bit */
-#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060)
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
/* rx ampdu bytes count, 32-bit */
-#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064)
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR23))
/* rx ampdu valid subframe count */
-#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068)
+#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR24))
#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916 GENMASK(31, 0)
/* rx ampdu valid subframe bytes count, 32bits */
-#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c)
+#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR25))
/* remaining windows protected stats */
-#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074)
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR27))
#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078)
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR28))
#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c)
-#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR29))
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916 GENMASK(15, 0)
-#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080)
+#define MT_MIB_SDRVEC(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRVEC))
#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916 GENMASK(31, 16)
/* rx blockack count, 32 bits */
-#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084)
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR31))
-#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088)
-#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR32))
+#define MT_MIB_SDR32_TX_PKT_EBF_CNT GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT GENMASK(31, 16)
-#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c)
-#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR33_TX_PKT_IBF_CNT GENMASK(15, 0)
-#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
+#define MT_MIB_SDRMUBF(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
/* 36, 37 both DNR */
-#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
-#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
-#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, __OFFS(MIB_DR8))
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, __OFFS(MIB_DR9))
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, __OFFS(MIB_DR11))
-#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR0) + (n))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR1) + (n))
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
-#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
-#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
-#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x518 + (n))
+#define MT_MIB_MB_BFTF(_band, n) MT_WF_MIB(_band, 0x510 + (n))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT) + \
+ ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT2) + \
+ ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, __OFFS(MIB_ARNG) + \
+ ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
-#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_MIB_BFCR0(_band) MT_WF_MIB(_band, 0x7b0)
+#define MT_MIB_BFCR0_RX_FB_HT GENMASK(15, 0)
+#define MT_MIB_BFCR0_RX_FB_VHT GENMASK(31, 16)
+
+#define MT_MIB_BFCR1(_band) MT_WF_MIB(_band, 0x7b4)
+#define MT_MIB_BFCR1_RX_FB_HE GENMASK(15, 0)
+
+#define MT_MIB_BFCR2(_band) MT_WF_MIB(_band, 0x7b8)
+#define MT_MIB_BFCR2_BFEE_TX_FB_TRIG GENMASK(15, 0)
+
+#define MT_MIB_BFCR7(_band) MT_WF_MIB(_band, 0x7cc)
+#define MT_MIB_BFCR7_BFEE_TX_FB_CPL GENMASK(15, 0)
+
+/* WTBLON TOP */
+#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_TOP_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_BASE 0x38000
+/* WTBL */
+#define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
- FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
- FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
-/* AGG: band 0(0x20800), band 1(0xa0800) */
-#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+/* AGG: band 0(0x820e2000), band 1(0x820f2000) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
-#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
-#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4)
+#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
+ (_n) * 4))
+#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
+ (_n) * 4))
#define MT_AGG_PCR0_MM_PROT BIT(0)
#define MT_AGG_PCR0_GF_PROT BIT(1)
#define MT_AGG_PCR0_BW20_PROT BIT(2)
@@ -305,31 +454,32 @@
#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
-#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR0))
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
-#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098)
-#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
-#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
-#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
+#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
+#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
+#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
+#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
-#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0)
-#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
+#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
+#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
-/* ARB: band 0(0x20c00), band 1(0xa0c00) */
-#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+/* ARB: band 0(0x820e3000), band 1(0x820f3000) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
-#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, __OFFS(ARB_SCR))
#define MT_ARB_SCR_TX_DISABLE BIT(8)
#define MT_ARB_SCR_RX_DISABLE BIT(9)
-#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
+#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, (__OFFS(ARB_DRNGR0) + \
+ (_n) * 4))
-/* RMAC: band 0(0x21400), band 1(0xa1400) */
-#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+/* RMAC: band 0(0x820e5000), band 1(0x820f5000) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
@@ -366,7 +516,7 @@
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
/* WFDMA0 */
-#define MT_WFDMA0_BASE 0xd4000
+#define MT_WFDMA0_BASE __REG(WFDMA0_ADDR)
#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
#define MT_WFDMA0_RST MT_WFDMA0(0x100)
@@ -381,15 +531,14 @@
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
-
-#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
-
-#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
-#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
-#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
@@ -404,129 +553,371 @@
#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_MCU_CMD MT_WFDMA1(0x1f0)
-#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
-#define MT_MCU_CMD_STOP_DMA BIT(2)
-#define MT_MCU_CMD_RESET_DONE BIT(3)
-#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
-#define MT_MCU_CMD_NORMAL_STATE BIT(5)
-#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-
#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
-#define MT_TX_RING_BASE MT_WFDMA1(0x300)
-#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
-
-#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
-#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
-#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
-#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
-#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
-#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
-#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
-#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
-
-#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
-#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
-#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
-#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
-#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
-#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
-#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
-#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
-
-#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
-#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
-#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
-#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
-
/* WFDMA CSR */
-#define MT_WFDMA_EXT_CSR_BASE 0xd7000
+#define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
-#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
-#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
-#define MT_INT_RX_DONE_DATA0 BIT(16)
-#define MT_INT_RX_DONE_DATA1 BIT(17)
-#define MT_INT_RX_DONE_WM BIT(0)
-#define MT_INT_RX_DONE_WA BIT(1)
-#define MT_INT_RX_DONE_WA_EXT BIT(2)
-#define MT_INT_RX_DONE_ALL (GENMASK(2, 0) | GENMASK(17, 16))
-#define MT_INT_TX_DONE_MCU_WA BIT(15)
-#define MT_INT_TX_DONE_FWDL BIT(26)
-#define MT_INT_TX_DONE_MCU_WM BIT(27)
-#define MT_INT_TX_DONE_BAND0 BIT(30)
-#define MT_INT_TX_DONE_BAND1 BIT(31)
-
-#define MT_INT_BAND1_MASK (MT_INT_RX_DONE_WA_EXT | \
- MT_INT_TX_DONE_BAND1)
-
-#define MT_INT_MCU_CMD BIT(29)
-
-#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
- MT_INT_TX_DONE_MCU_WM | \
- MT_INT_TX_DONE_FWDL)
-
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
-#define MT_INT1_SOURCE_CSR MT_WFDMA_EXT_CSR(0x88)
-#define MT_INT1_MASK_CSR MT_WFDMA_EXT_CSR(0x8c)
-
-#define MT_PCIE_RECOG_ID MT_WFDMA_EXT_CSR(0x90)
+#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
/* WFDMA0 PCIE1 */
-#define MT_WFDMA0_PCIE1_BASE 0xd8000
-#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
-#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
/* WFDMA1 PCIE1 */
-#define MT_WFDMA1_PCIE1_BASE 0xd9000
-#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA1_PCIE1_BASE + (ofs))
-#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_TOP_RGU_BASE 0xf0000
-#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
-#define MT_TOP_PWR_KEY (0x5746 << 16)
-#define MT_TOP_PWR_SW_RST BIT(0)
-#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
-#define MT_TOP_PWR_HW_CTRL BIT(4)
-#define MT_TOP_PWR_PWR_ON BIT(7)
+/* WFDMA COMMON */
+#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
+#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
+
+#define MT_Q_ID(q) (dev->q_id[(q)])
+#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
+ MT_WFDMA1_BASE : MT_WFDMA0_BASE)
+
+#define MT_MCUQ_ID(q) MT_Q_ID(q)
+#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
+#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
+
+#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
+#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
+#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+
+#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
+ MT_MCUQ_ID(q)* 0x4)
+#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+ MT_RXQ_ID(q)* 0x4)
+#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
+#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+
+#define MT_INT1_SOURCE_CSR __REG(INT1_SOURCE_CSR)
+#define MT_INT1_MASK_CSR __REG(INT1_MASK_CSR)
+
+#define MT_INT_RX_DONE_BAND0 BIT(16)
+#define MT_INT_RX_DONE_BAND1 BIT(17)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE_WA_MAIN BIT(1)
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_RX_DONE_BAND0_MT7916 BIT(22)
+#define MT_INT_RX_DONE_BAND1_MT7916 BIT(23)
+#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
+#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+
+#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
+ MT_INT_RX(MT_RXQ_MCU_WA))
+
+#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_EXT) | \
+ MT_INT_RX(MT_RXQ_EXT_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
+ MT_INT_BAND0_RX_DONE | \
+ MT_INT_BAND1_RX_DONE)
-#define MT_INFRA_CFG_BASE 0xf1000
-#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+ MT_INT_TX_MCU(MT_MCUQ_FWDL))
+
+#define MT_MCU_CMD __REG(INT_MCU_CMD_SOURCE)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
+/* TOP RGU */
+#define MT_TOP_RGU_BASE 0x18000000
+#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
+#define MT_TOP_PWR_KEY (0x5746 << 16)
+#define MT_TOP_PWR_SW_RST BIT(0)
+#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
+#define MT_TOP_PWR_HW_CTRL BIT(4)
+#define MT_TOP_PWR_PWR_ON BIT(7)
+
+#define MT_TOP_RGU_SYSRAM_PDN (MT_TOP_RGU_BASE + 0x050)
+#define MT_TOP_RGU_SYSRAM_SLP (MT_TOP_RGU_BASE + 0x054)
+#define MT_TOP_WFSYS_PWR (MT_TOP_RGU_BASE + 0x010)
+#define MT_TOP_PWR_EN_MASK BIT(7)
+#define MT_TOP_PWR_ACK_MASK BIT(6)
+#define MT_TOP_PWR_KEY_MASK GENMASK(31, 16)
+
+#define MT7986_TOP_WM_RESET (MT_TOP_RGU_BASE + 0x120)
+#define MT7986_TOP_WM_RESET_MASK BIT(0)
+
+/* l1/l2 remap */
+#define MT_HIF_REMAP_L1 0xf11ac
+#define MT_HIF_REMAP_L1_MT7916 0xfe260
#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
#define MT_HIF_REMAP_BASE_L1 0xe0000
-#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
+#define MT_HIF_REMAP_L2 0xf11b0
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
-#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_HIF_REMAP_L2_MT7916 0x1b8
+#define MT_HIF_REMAP_L2_MASK_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_L2_OFFSET_MT7916 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2_MT7916 0x40000
+
+#define MT_INFRA_BASE 0x18000000
+#define MT_WFSYS0_PHY_START 0x18400000
+#define MT_WFSYS1_PHY_START 0x18800000
+#define MT_WFSYS1_PHY_END 0x18bfffff
+#define MT_CBTOP1_PHY_START 0x70000000
+#define MT_CBTOP1_PHY_END __REG(CBTOP1_PHY_END)
+#define MT_CBTOP2_PHY_START 0xf0000000
+#define MT_CBTOP2_PHY_END 0xffffffff
+#define MT_INFRA_MCU_START 0x7c000000
+#define MT_INFRA_MCU_END __REG(INFRA_MCU_ADDR_END)
+#define MT_CONN_INFRA_OFFSET(p) ((p) - MT_INFRA_BASE)
+
+/* CONN INFRA CFG */
+#define MT_CONN_INFRA_BASE 0x18001000
+#define MT_CONN_INFRA(ofs) (MT_CONN_INFRA_BASE + (ofs))
+
+#define MT_CONN_INFRA_EFUSE MT_CONN_INFRA(0x020)
+
+#define MT_CONN_INFRA_ADIE_RESET MT_CONN_INFRA(0x030)
+#define MT_CONN_INFRA_ADIE1_RESET_MASK BIT(0)
+#define MT_CONN_INFRA_ADIE2_RESET_MASK BIT(2)
+
+#define MT_CONN_INFRA_OSC_RC_EN MT_CONN_INFRA(0x380)
+
+#define MT_CONN_INFRA_OSC_CTRL MT_CONN_INFRA(0x300)
+#define MT_CONN_INFRA_OSC_RC_EN_MASK BIT(7)
+#define MT_CONN_INFRA_OSC_STB_TIME_MASK GENMASK(23, 0)
+
+#define MT_CONN_INFRA_HW_CTRL MT_CONN_INFRA(0x200)
+#define MT_CONN_INFRA_HW_CTRL_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT MT_CONN_INFRA(0x540)
+#define MT_CONN_INFRA_WF_SLP_PROT_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT_RDY MT_CONN_INFRA(0x544)
+#define MT_CONN_INFRA_CONN_WF_MASK (BIT(29) | BIT(31))
+#define MT_CONN_INFRA_CONN (BIT(25) | BIT(29) | BIT(31))
+
+#define MT_CONN_INFRA_EMI_REQ MT_CONN_INFRA(0x414)
+#define MT_CONN_INFRA_EMI_REQ_MASK BIT(0)
+#define MT_CONN_INFRA_INFRA_REQ_MASK BIT(5)
+
+/* AFE */
+#define MT_AFE_CTRL_BASE(_band) (0x18003000 + ((_band) << 19))
+#define MT_AFE_CTRL(_band, ofs) (MT_AFE_CTRL_BASE(_band) + (ofs))
+
+#define MT_AFE_DIG_EN_01(_band) MT_AFE_CTRL(_band, 0x00)
+#define MT_AFE_DIG_EN_02(_band) MT_AFE_CTRL(_band, 0x04)
+#define MT_AFE_DIG_EN_03(_band) MT_AFE_CTRL(_band, 0x08)
+#define MT_AFE_DIG_TOP_01(_band) MT_AFE_CTRL(_band, 0x0c)
+
+#define MT_AFE_PLL_STB_TIME(_band) MT_AFE_CTRL(_band, 0xf4)
+#define MT_AFE_PLL_STB_TIME_MASK (GENMASK(30, 16) | GENMASK(14, 0))
+#define MT_AFE_PLL_STB_TIME_VAL (FIELD_PREP(GENMASK(30, 16), 0x4bc) | \
+ FIELD_PREP(GENMASK(14, 0), 0x7e4))
+#define MT_AFE_BPLL_CFG_MASK GENMASK(7, 6)
+#define MT_AFE_WPLL_CFG_MASK GENMASK(1, 0)
+#define MT_AFE_MCU_WPLL_CFG_MASK GENMASK(3, 2)
+#define MT_AFE_MCU_BPLL_CFG_MASK GENMASK(17, 16)
+#define MT_AFE_PLL_CFG_MASK (MT_AFE_BPLL_CFG_MASK | \
+ MT_AFE_WPLL_CFG_MASK | \
+ MT_AFE_MCU_WPLL_CFG_MASK | \
+ MT_AFE_MCU_BPLL_CFG_MASK)
+#define MT_AFE_PLL_CFG_VAL (FIELD_PREP(MT_AFE_BPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_WPLL_CFG_MASK, 0x2) | \
+ FIELD_PREP(MT_AFE_MCU_WPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_MCU_BPLL_CFG_MASK, 0x2))
+
+#define MT_AFE_DIG_TOP_01_MASK GENMASK(18, 15)
+#define MT_AFE_DIG_TOP_01_VAL FIELD_PREP(MT_AFE_DIG_TOP_01_MASK, 0x9)
+
+#define MT_AFE_RG_WBG_EN_RCK_MASK BIT(0)
+#define MT_AFE_RG_WBG_EN_BPLL_UP_MASK BIT(21)
+#define MT_AFE_RG_WBG_EN_WPLL_UP_MASK BIT(20)
+#define MT_AFE_RG_WBG_EN_PLL_UP_MASK (MT_AFE_RG_WBG_EN_BPLL_UP_MASK | \
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK)
+#define MT_AFE_RG_WBG_EN_TXCAL_MASK GENMASK(21, 17)
+
+#define MT_ADIE_SLP_CTRL_BASE(_band) (0x18005000 + ((_band) << 19))
+#define MT_ADIE_SLP_CTRL(_band, ofs) (MT_ADIE_SLP_CTRL_BASE(_band) + (ofs))
+
+#define MT_ADIE_SLP_CTRL_CK0(_band) MT_ADIE_SLP_CTRL(_band, 0x120)
+
+/* ADIE */
+#define MT_ADIE_CHIP_ID 0x02c
+#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
+#define MT_ADIE_IDX0 GENMASK(15, 0)
+#define MT_ADIE_IDX1 GENMASK(31, 16)
+
+#define MT_ADIE_RG_TOP_THADC_BG 0x034
+#define MT_ADIE_VRPI_SEL_CR_MASK GENMASK(15, 12)
+#define MT_ADIE_VRPI_SEL_EFUSE_MASK GENMASK(6, 3)
+
+#define MT_ADIE_RG_TOP_THADC 0x038
+#define MT_ADIE_PGA_GAIN_MASK GENMASK(25, 23)
+#define MT_ADIE_PGA_GAIN_EFUSE_MASK GENMASK(2, 0)
+#define MT_ADIE_LDO_CTRL_MASK GENMASK(27, 26)
+#define MT_ADIE_LDO_CTRL_EFUSE_MASK GENMASK(6, 5)
+
+#define MT_AFE_RG_ENCAL_WBTAC_IF_SW 0x070
+#define MT_ADIE_EFUSE_RDATA0 0x130
+
+#define MT_ADIE_EFUSE2_CTRL 0x148
+#define MT_ADIE_EFUSE_CTRL_MASK BIT(1)
+
+#define MT_ADIE_EFUSE_CFG 0x144
+#define MT_ADIE_EFUSE_MODE_MASK GENMASK(7, 6)
+#define MT_ADIE_EFUSE_ADDR_MASK GENMASK(25, 16)
+#define MT_ADIE_EFUSE_VALID_MASK BIT(29)
+#define MT_ADIE_EFUSE_KICK_MASK BIT(30)
+
+#define MT_ADIE_THADC_ANALOG 0x3a6
+
+#define MT_ADIE_THADC_SLOP 0x3a7
+#define MT_ADIE_ANA_EN_MASK BIT(7)
+
+#define MT_ADIE_7975_XTAL_CAL 0x3a1
+#define MT_ADIE_TRIM_MASK GENMASK(6, 0)
+#define MT_ADIE_EFUSE_TRIM_MASK GENMASK(5, 0)
+#define MT_ADIE_XO_TRIM_EN_MASK BIT(7)
+#define MT_ADIE_XTAL_DECREASE_MASK BIT(6)
+
+#define MT_ADIE_7975_XO_TRIM2 0x3a2
+#define MT_ADIE_7975_XO_TRIM3 0x3a3
+#define MT_ADIE_7975_XO_TRIM4 0x3a4
+#define MT_ADIE_7975_XTAL_EN 0x3a5
+
+#define MT_ADIE_XO_TRIM_FLOW 0x3ac
+#define MT_ADIE_XTAL_AXM_80M_OSC 0x390
+#define MT_ADIE_XTAL_AXM_40M_OSC 0x391
+#define MT_ADIE_XTAL_TRIM1_80M_OSC 0x398
+#define MT_ADIE_XTAL_TRIM1_40M_OSC 0x399
+#define MT_ADIE_WRI_CK_SEL 0x4ac
+#define MT_ADIE_RG_STRAP_PIN_IN 0x4fc
+#define MT_ADIE_XTAL_C1 0x654
+#define MT_ADIE_XTAL_C2 0x658
+#define MT_ADIE_RG_XO_01 0x65c
+#define MT_ADIE_RG_XO_03 0x664
+
+#define MT_ADIE_CLK_EN 0xa00
+
+#define MT_ADIE_7975_XTAL 0xa18
+#define MT_ADIE_7975_XTAL_EN_MASK BIT(29)
+
+#define MT_ADIE_7975_COCLK 0xa1c
+#define MT_ADIE_7975_XO_2 0xa84
+#define MT_ADIE_7975_XO_2_FIX_EN BIT(31)
+
+#define MT_ADIE_7975_XO_CTRL2 0xa94
+#define MT_ADIE_7975_XO_CTRL2_C1_MASK GENMASK(26, 20)
+#define MT_ADIE_7975_XO_CTRL2_C2_MASK GENMASK(18, 12)
+#define MT_ADIE_7975_XO_CTRL2_MASK (MT_ADIE_7975_XO_CTRL2_C1_MASK | \
+ MT_ADIE_7975_XO_CTRL2_C2_MASK)
+
+#define MT_ADIE_7975_XO_CTRL6 0xaa4
+#define MT_ADIE_7975_XO_CTRL6_MASK BIT(16)
+
+/* TOP SPI */
+#define MT_TOP_SPI_ADIE_BASE(_band) (0x18004000 + ((_band) << 19))
+#define MT_TOP_SPI_ADIE(_band, ofs) (MT_TOP_SPI_ADIE_BASE(_band) + (ofs))
+
+#define MT_TOP_SPI_BUSY_CR(_band) MT_TOP_SPI_ADIE(_band, 0)
+#define MT_TOP_SPI_POLLING_BIT BIT(5)
+
+#define MT_TOP_SPI_ADDR_CR(_band) MT_TOP_SPI_ADIE(_band, 0x50)
+#define MT_TOP_SPI_READ_ADDR_FORMAT (BIT(12) | BIT(13) | BIT(15))
+#define MT_TOP_SPI_WRITE_ADDR_FORMAT (BIT(13) | BIT(15))
+
+#define MT_TOP_SPI_WRITE_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x54)
+#define MT_TOP_SPI_READ_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x58)
+
+/* CONN INFRA CKGEN */
+#define MT_INFRA_CKGEN_BASE 0x18009000
+#define MT_INFRA_CKGEN(ofs) (MT_INFRA_CKGEN_BASE + (ofs))
+
+#define MT_INFRA_CKGEN_BUS MT_INFRA_CKGEN(0xa00)
+#define MT_INFRA_CKGEN_BUS_CLK_SEL_MASK BIT(23)
+#define MT_INFRA_CKGEN_BUS_RDY_SEL_MASK BIT(29)
+
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_1 MT_INFRA_CKGEN(0x008)
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_2 MT_INFRA_CKGEN(0x00c)
+
+#define MT_INFRA_CKGEN_RFSPI_WPLL_DIV MT_INFRA_CKGEN(0x040)
+#define MT_INFRA_CKGEN_DIV_SEL_MASK GENMASK(7, 2)
+#define MT_INFRA_CKGEN_DIV_EN_MASK BIT(0)
+
+/* CONN INFRA BUS */
+#define MT_INFRA_BUS_BASE 0x1800e000
+#define MT_INFRA_BUS(ofs) (MT_INFRA_BUS_BASE + (ofs))
+
+#define MT_INFRA_BUS_OFF_TIMEOUT MT_INFRA_BUS(0x300)
+#define MT_INFRA_BUS_TIMEOUT_LIMIT_MASK GENMASK(14, 7)
+#define MT_INFRA_BUS_TIMEOUT_EN_MASK GENMASK(3, 0)
+
+#define MT_INFRA_BUS_ON_TIMEOUT MT_INFRA_BUS(0x31c)
+#define MT_INFRA_BUS_EMI_START MT_INFRA_BUS(0x360)
+#define MT_INFRA_BUS_EMI_END MT_INFRA_BUS(0x364)
+
+/* CONN_INFRA_SKU */
+#define MT_CONNINFRA_SKU_DEC_ADDR 0x18050000
+#define MT_CONNINFRA_SKU_MASK GENMASK(15, 0)
+#define MT_ADIE_TYPE_MASK BIT(1)
+
+/* FW MODE SYNC */
+#define MT_SWDEF_MODE 0x41f23c
+#define MT_SWDEF_MODE_MT7916 0x41143c
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
@@ -540,13 +931,7 @@
#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
-#define MT_SWDEF_BASE 0x41f200
-#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
-#define MT_SWDEF_MODE MT_SWDEF(0x3c)
-#define MT_SWDEF_NORMAL_MODE 0
-#define MT_SWDEF_ICAP_MODE 1
-#define MT_SWDEF_SPECTRUM_MODE 2
-
+/* LED */
#define MT_LED_TOP_BASE 0x18013000
#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
@@ -561,46 +946,124 @@
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
+#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+
+/* MT TOP */
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
-#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
+
+#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
-#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
-#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
-#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
-
#define MT_HW_BOUND 0x70010020
-#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
+
+#define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4)
+#define MT_TOP_WFSYS_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_MCU_EMI_BASE MT_TOP(0x1c4)
+#define MT_TOP_MCU_EMI_BASE_MASK GENMASK(19, 0)
+
+#define MT_TOP_CONN_INFRA_WAKEUP MT_TOP(0x1a0)
+#define MT_TOP_CONN_INFRA_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_WFSYS_RESET_STATUS MT_TOP(0x2cc)
+#define MT_TOP_WFSYS_RESET_STATUS_MASK BIT(30)
+
+/* SEMA */
+#define MT_SEMA_BASE 0x18070000
+#define MT_SEMA(ofs) (MT_SEMA_BASE + (ofs))
+
+#define MT_SEMA_RFSPI_STATUS (MT_SEMA(0x2000) + (11 * 4))
+#define MT_SEMA_RFSPI_RELEASE (MT_SEMA(0x2200) + (11 * 4))
+#define MT_SEMA_RFSPI_STATUS_MASK BIT(1)
+
+/* MCU BUS */
+#define MT_MCU_BUS_BASE 0x18400000
+#define MT_MCU_BUS(ofs) (MT_MCU_BUS_BASE + (ofs))
+
+#define MT_MCU_BUS_TIMEOUT MT_MCU_BUS(0xf0440)
+#define MT_MCU_BUS_TIMEOUT_SET_MASK GENMASK(7, 0)
+#define MT_MCU_BUS_TIMEOUT_CG_EN_MASK BIT(28)
+#define MT_MCU_BUS_TIMEOUT_EN_MASK BIT(31)
-#define MT_PCIE1_MAC_BASE 0x74020000
-#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
-#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
+#define MT_MCU_BUS_REMAP MT_MCU_BUS(0x120)
+/* TOP CFG */
+#define MT_TOP_CFG_BASE 0x184b0000
+#define MT_TOP_CFG(ofs) (MT_TOP_CFG_BASE + (ofs))
+
+#define MT_TOP_CFG_IP_VERSION_ADDR MT_TOP_CFG(0x010)
+
+/* TOP CFG ON */
+#define MT_TOP_CFG_ON_BASE 0x184c1000
+#define MT_TOP_CFG_ON(ofs) (MT_TOP_CFG_ON_BASE + (ofs))
+
+#define MT_TOP_CFG_ON_ROM_IDX MT_TOP_CFG_ON(0x604)
+
+/* SLP CTRL */
+#define MT_SLP_BASE 0x184c3000
+#define MT_SLP(ofs) (MT_SLP_BASE + (ofs))
+
+#define MT_SLP_STATUS MT_SLP(0x00c)
+#define MT_SLP_WFDMA2CONN_MASK (BIT(21) | BIT(23))
+#define MT_SLP_CTRL_EN_MASK BIT(0)
+#define MT_SLP_CTRL_BSY_MASK BIT(1)
+
+/* MCU BUS DBG */
+#define MT_MCU_BUS_DBG_BASE 0x18500000
+#define MT_MCU_BUS_DBG(ofs) (MT_MCU_BUS_DBG_BASE + (ofs))
+
+#define MT_MCU_BUS_DBG_TIMEOUT MT_MCU_BUS_DBG(0x0)
+#define MT_MCU_BUS_DBG_TIMEOUT_SET_MASK GENMASK(31, 16)
+#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
+#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
+
+/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
-#define MT_WF_IRPI_BASE 0x83006000
-#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + ((ofs) << 16))
+#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
+#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
+
+/* PP TOP */
+#define MT_WF_PP_TOP_BASE 0x820cc000
+#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
+
+#define MT_WF_PP_TOP_RXQ_WFDMA_CF_5 MT_WF_PP_TOP(0x0e8)
+#define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK BIT(6)
+
+#define MT_WF_IRPI_BASE 0x83000000
+#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + (ofs))
+
+#define MT_WF_IRPI_NSS(phy, nss) MT_WF_IRPI(0x6000 + ((phy) << 20) + ((nss) << 16))
+#define MT_WF_IRPI_NSS_MT7916(phy, nss) MT_WF_IRPI(0x1000 + ((phy) << 20) + ((nss) << 16))
-/* PHY: band 0(0x83080000), band 1(0x83090000) */
+/* PHY */
#define MT_WF_PHY_BASE 0x83080000
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_MT7916(_phy) MT_WF_PHY(0x2004 + ((_phy) << 20))
#define MT_WF_PHY_RX_CTRL1_IPI_EN GENMASK(2, 0)
#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16))
+#define MT_WF_PHY_RXTD12_MT7916(_phy) MT_WF_PHY(0x8230 + ((_phy) << 20))
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
-#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
new file mode 100644
index 000000000000..3028c02cb840
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_gpio.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+
+#include "mt7915.h"
+
+/* INFRACFG */
+#define MT_INFRACFG_CONN2AP_SLPPROT 0x0d0
+#define MT_INFRACFG_AP2CONN_SLPPROT 0x0d4
+
+#define MT_INFRACFG_RX_EN_MASK BIT(16)
+#define MT_INFRACFG_TX_RDY_MASK BIT(4)
+#define MT_INFRACFG_TX_EN_MASK BIT(0)
+
+/* TOP POS */
+#define MT_TOP_POS_FAST_CTRL 0x114
+#define MT_TOP_POS_FAST_EN_MASK BIT(3)
+
+#define MT_TOP_POS_SKU 0x21c
+#define MT_TOP_POS_SKU_MASK GENMASK(31, 28)
+#define MT_TOP_POS_SKU_ADIE_DBDC_MASK BIT(2)
+
+enum {
+ ADIE_SB,
+ ADIE_DBDC
+};
+
+static int
+mt76_wmac_spi_read(struct mt7915_dev *dev, u8 adie, u32 addr, u32 *val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_READ_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), 0);
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ *val = mt76_rr(dev, MT_TOP_SPI_READ_DATA_CR(adie));
+
+ return 0;
+}
+
+static int
+mt76_wmac_spi_write(struct mt7915_dev *dev, u8 adie, u32 addr, u32 val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_WRITE_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), val);
+
+ return read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+}
+
+static int
+mt76_wmac_spi_rmw(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 mask, u32 val)
+{
+ u32 cur, ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, addr, &cur);
+ if (ret)
+ return ret;
+
+ cur &= ~mask;
+ cur |= val;
+
+ return mt76_wmac_spi_write(dev, adie, addr, cur);
+}
+
+static int
+mt7986_wmac_adie_efuse_read(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *data)
+{
+ int ret, temp;
+ u32 val, mask;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_EFUSE_CFG,
+ MT_ADIE_EFUSE_CTRL_MASK);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, BIT(30), 0x0);
+ if (ret)
+ return ret;
+
+ mask = (MT_ADIE_EFUSE_MODE_MASK | MT_ADIE_EFUSE_ADDR_MASK |
+ MT_ADIE_EFUSE_KICK_MASK);
+ val = FIELD_PREP(MT_ADIE_EFUSE_MODE_MASK, 0) |
+ FIELD_PREP(MT_ADIE_EFUSE_ADDR_MASK, addr) |
+ FIELD_PREP(MT_ADIE_EFUSE_KICK_MASK, 1);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, mask, val);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_wmac_spi_read, temp,
+ !temp && !FIELD_GET(MT_ADIE_EFUSE_KICK_MASK, val),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_EFUSE_VALID_MASK, val) == 1)
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE_RDATA0,
+ data);
+
+ return ret;
+}
+
+static inline void mt76_wmac_spi_lock(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ read_poll_timeout(mt76_rr, cur,
+ FIELD_GET(MT_SEMA_RFSPI_STATUS_MASK, cur),
+ 1000, 1000 * MSEC_PER_SEC, false, dev,
+ MT_SEMA_RFSPI_STATUS);
+}
+
+static inline void mt76_wmac_spi_unlock(struct mt7915_dev *dev)
+{
+ mt76_wr(dev, MT_SEMA_RFSPI_RELEASE, 1);
+}
+
+static u32 mt76_wmac_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= readl(base + offset) & ~mask;
+ writel(val, base + offset);
+
+ return val;
+}
+
+static u8 mt7986_wmac_check_adie_type(struct mt7915_dev *dev)
+{
+ u32 val;
+
+ val = readl(dev->sku + MT_TOP_POS_SKU);
+
+ return FIELD_GET(MT_TOP_POS_SKU_ADIE_DBDC_MASK, val);
+}
+
+static int mt7986_wmac_consys_reset(struct mt7915_dev *dev, bool enable)
+{
+ if (!enable)
+ return reset_control_assert(dev->rstc);
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_FAST_CTRL,
+ MT_TOP_POS_FAST_EN_MASK,
+ FIELD_PREP(MT_TOP_POS_FAST_EN_MASK, 0x1));
+
+ return reset_control_deassert(dev->rstc);
+}
+
+static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
+{
+ struct pinctrl_state *state;
+ struct pinctrl *pinctrl;
+ int ret;
+ u8 type;
+
+ type = mt7986_wmac_check_adie_type(dev);
+ pinctrl = devm_pinctrl_get(dev->mt76.dev);
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ switch (type) {
+ case ADIE_SB:
+ state = pinctrl_lookup_state(pinctrl, "default");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ case ADIE_DBDC:
+ state = pinctrl_lookup_state(pinctrl, "dbdc");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ }
+
+ ret = pinctrl_select_state(pinctrl, state);
+ if (ret)
+ return ret;
+
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int mt7986_wmac_consys_lockup(struct mt7915_dev *dev, bool enable)
+{
+ int ret;
+ u32 cur;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_RX_EN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_TX_RDY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+
+ return 0;
+}
+
+static int mt7986_wmac_coninfra_check(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02070000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC,
+ false, dev, MT_CONN_INFRA_BASE);
+}
+
+static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ u32 val;
+
+ np = of_parse_phandle(pdev->of_node, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(np);
+ if (!rmem)
+ return -EINVAL;
+
+ val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
+
+ /* Set conninfra subsys PLL check */
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_TOP_MCU_EMI_BASE,
+ MT_TOP_MCU_EMI_BASE_MASK, val);
+
+ mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base);
+ mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size);
+
+ mt76_rr(dev, MT_CONN_INFRA_EFUSE);
+
+ /* Set conninfra sysram */
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_PDN, 0);
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_SLP, 1);
+
+ return 0;
+}
+
+static int mt7986_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type)
+{
+ int ret;
+ u32 adie_main, adie_ext;
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE1_RESET_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1);
+
+ mt76_wmac_spi_lock(dev);
+
+ ret = mt76_wmac_spi_read(dev, 0, MT_ADIE_CHIP_ID, &adie_main);
+ if (ret)
+ goto out;
+
+ ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext);
+ if (ret)
+ goto out;
+
+ *adie_type = FIELD_GET(MT_ADIE_CHIP_ID_MASK, adie_main) |
+ (MT_ADIE_CHIP_ID_MASK & adie_ext);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return 0;
+}
+
+static inline u16 mt7986_adie_idx(u8 adie, u32 adie_type)
+{
+ if (adie == 0)
+ return u32_get_bits(adie_type, MT_ADIE_IDX0);
+ else
+ return u32_get_bits(adie_type, MT_ADIE_IDX1);
+}
+
+static inline bool is_7975(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7975;
+}
+
+static inline bool is_7976(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7976;
+}
+
+static int mt7986_wmac_adie_thermal_cal(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, val;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_ANALOG,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_VRPI_SEL_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC_BG,
+ MT_ADIE_VRPI_SEL_CR_MASK,
+ FIELD_PREP(MT_ADIE_VRPI_SEL_CR_MASK, val));
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(MT_ADIE_PGA_GAIN_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_PGA_GAIN_MASK,
+ FIELD_PREP(MT_ADIE_PGA_GAIN_MASK, val));
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_SLOP,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_LDO_CTRL_EFUSE_MASK, data);
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_LDO_CTRL_MASK,
+ FIELD_PREP(MT_ADIE_LDO_CTRL_MASK, val));
+ }
+
+ return 0;
+}
+
+static int
+mt7986_read_efuse_xo_trim_7976(struct mt7915_dev *dev, u8 adie,
+ bool is_40m, int *result)
+{
+ int ret;
+ u32 data, addr;
+
+ addr = is_40m ? MT_ADIE_XTAL_AXM_40M_OSC : MT_ADIE_XTAL_AXM_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data)) {
+ *result = 64;
+ } else {
+ *result = FIELD_GET(MT_ADIE_TRIM_MASK, data);
+ addr = is_40m ? MT_ADIE_XTAL_TRIM1_40M_OSC :
+ MT_ADIE_XTAL_TRIM1_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data) &&
+ FIELD_GET(MT_ADIE_XTAL_DECREASE_MASK, data))
+ *result -= FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+ else if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data))
+ *result += FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+
+ *result = max(0, min(127, *result));
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
+{
+ int ret, trim_80m, trim_40m;
+ u32 data, val, mode;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_XO_TRIM_FLOW,
+ &data);
+ if (ret || !FIELD_GET(BIT(1), data))
+ return 0;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, false, &trim_80m);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, true, &trim_40m);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_RG_STRAP_PIN_IN, &val);
+ if (ret)
+ return ret;
+
+ mode = FIELD_PREP(GENMASK(6, 4), val);
+ if (!mode || mode == 0x2) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ } else if (mode == 0x3 || mode == 0x4 || mode == 0x6) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ }
+
+ return ret;
+}
+
+static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, 0x1d59080f);
+ if (ret)
+ return ret;
+
+ return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, 0x34c00fe0);
+}
+
+static int
+mt7986_read_efuse_xo_trim_7975(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *result)
+{
+ int ret;
+ u32 data;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MT_ADIE_XO_TRIM_EN_MASK)) {
+ if ((data & MT_ADIE_XTAL_DECREASE_MASK))
+ *result -= (data & MT_ADIE_EFUSE_TRIM_MASK);
+ else
+ *result += (data & MT_ADIE_EFUSE_TRIM_MASK);
+
+ *result = (*result & MT_ADIE_TRIM_MASK);
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, result = 0, value;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_EN,
+ &data);
+ if (ret || !(data & BIT(1)))
+ return 0;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_CAL,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & MT_ADIE_XO_TRIM_EN_MASK)
+ result = (data & MT_ADIE_TRIM_MASK);
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM2,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM3,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM4,
+ &result);
+ if (ret)
+ return ret;
+
+ /* Update trim value to C1 and C2*/
+ value = FIELD_GET(MT_ADIE_7975_XO_CTRL2_C1_MASK, result) |
+ FIELD_GET(MT_ADIE_7975_XO_CTRL2_C2_MASK, result);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL2,
+ MT_ADIE_7975_XO_CTRL2_MASK, value);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_7975_XTAL, &value);
+ if (ret)
+ return ret;
+
+ if (value & MT_ADIE_7975_XTAL_EN_MASK) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_2,
+ MT_ADIE_7975_XO_2_FIX_EN, 0x0);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL6,
+ MT_ADIE_7975_XO_CTRL6_MASK, 0x1);
+}
+
+static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+
+ /* disable CAL LDO and fine tune RFDIG LDO */
+ ret = mt76_wmac_spi_write(dev, adie, 0x348, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x378, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a8, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d8, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* set CKA driving and filter */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa1c, 0x30000aaa);
+ if (ret)
+ return ret;
+
+ /* set CKB LDO to 1.4V */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa84, 0x8470008a);
+ if (ret)
+ return ret;
+
+ /* turn on SX0 LTBUF */
+ ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* CK_BUF_SW_EN = 1 (all buf in manual mode.) */
+ ret = mt76_wmac_spi_write(dev, adie, 0xaa4, 0x01001fc0);
+ if (ret)
+ return ret;
+
+ /* BT mode/WF normal mode 00000005 */
+ ret = mt76_wmac_spi_write(dev, adie, 0x070, 0x00000005);
+ if (ret)
+ return ret;
+
+ /* BG thermal sensor offset update */
+ ret = mt76_wmac_spi_write(dev, adie, 0x344, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x374, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a4, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d4, 0x00000088);
+ if (ret)
+ return ret;
+
+ /* set WCON VDD IPTAT to "0000" */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa80, 0x44d07000);
+ if (ret)
+ return ret;
+
+ /* change back LTBUF SX3 drving to default value */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa88, 0x3900aaaa);
+ if (ret)
+ return ret;
+
+ /* SM input cap off */
+ ret = mt76_wmac_spi_write(dev, adie, 0x2c4, 0x00000000);
+ if (ret)
+ return ret;
+
+ /* set CKB driving and filter */
+ return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072);
+}
+
+static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ mt76_wmac_spi_lock(dev);
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_CLK_EN, ~0);
+ if (ret)
+ goto out;
+
+ if (is_7975(dev, adie, adie_type)) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_COCLK,
+ BIT(1), 0x1);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7975(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7975(dev, adie);
+ } else if (is_7976(dev, adie, adie_type)) {
+ if (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC) {
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_ADIE_WRI_CK_SEL, 0x1c);
+ if (ret)
+ goto out;
+ }
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7976(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7976(dev, adie);
+ }
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static int
+mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type)
+{
+ int ret;
+ u8 idx;
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, adie, adie_type))
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x80000000);
+ else
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x88888005);
+ if (ret)
+ goto out;
+
+ idx = dbdc ? ADIE_DBDC : adie;
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_BPLL_UP_MASK, 0x1);
+ usleep_range(30, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x1f);
+ usleep_range(800, 1000);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_TXCAL_MASK, 0x0);
+ mt76_rmw(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_PLL_UP_MASK, 0x0);
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x5);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static void mt7986_wmac_subsys_pll_initial(struct mt7915_dev *dev, u8 band)
+{
+ mt76_rmw(dev, MT_AFE_PLL_STB_TIME(band),
+ MT_AFE_PLL_STB_TIME_MASK, MT_AFE_PLL_STB_TIME_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_02(band),
+ MT_AFE_PLL_CFG_MASK, MT_AFE_PLL_CFG_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_TOP_01(band),
+ MT_AFE_DIG_TOP_01_MASK, MT_AFE_DIG_TOP_01_VAL);
+}
+
+static void mt7986_wmac_subsys_setting(struct mt7915_dev *dev)
+{
+ /* Subsys pll init */
+ mt7986_wmac_subsys_pll_initial(dev, 0);
+ mt7986_wmac_subsys_pll_initial(dev, 1);
+
+ /* Set legacy OSC control stable time*/
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_RC_EN,
+ MT_CONN_INFRA_OSC_RC_EN_MASK, 0x0);
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_CTRL,
+ MT_CONN_INFRA_OSC_STB_TIME_MASK, 0x80706);
+
+ /* prevent subsys from power on/of in a short time interval */
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR,
+ MT_TOP_PWR_ACK_MASK | MT_TOP_PWR_KEY_MASK,
+ MT_TOP_PWR_KEY);
+}
+
+static int mt7986_wmac_bus_timeout(struct mt7915_dev *dev)
+{
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0x2);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0xc);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ return mt7986_wmac_coninfra_check(dev);
+}
+
+static void mt7986_wmac_clock_enable(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x8);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_CLK_SEL_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_HW_CTRL,
+ MT_CONN_INFRA_HW_CTRL_MASK, 0x1);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x1);
+
+ usleep_range(900, 1000);
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, 0, adie_type) || is_7976(dev, 0, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(0),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ if (is_7975(dev, 1, adie_type) || is_7976(dev, 1, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(1),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ mt76_wmac_spi_unlock(dev);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x0);
+ usleep_range(900, 1000);
+}
+
+static int mt7986_wmac_top_wfsys_wakeup(struct mt7915_dev *dev, bool enable)
+{
+ mt76_rmw_field(dev, MT_TOP_WFSYS_WAKEUP,
+ MT_TOP_WFSYS_WAKEUP_MASK, enable);
+
+ usleep_range(900, 1000);
+
+ if (!enable)
+ return 0;
+
+ return mt7986_wmac_coninfra_check(dev);
+}
+
+static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
+ MT7986_TOP_WM_RESET_MASK, enable);
+ if (!enable)
+ return 0;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x1d1e),
+ USEC_PER_MSEC, 5000 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_ON_ROM_IDX);
+}
+
+static int mt7986_wmac_wfsys_poweron(struct mt7915_dev *dev, bool enable)
+{
+ u32 mask = MT_TOP_PWR_EN_MASK | MT_TOP_PWR_KEY_MASK;
+ u32 cur;
+
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR, mask,
+ MT_TOP_PWR_KEY | FIELD_PREP(MT_TOP_PWR_EN_MASK, enable));
+
+ return read_poll_timeout(mt76_rr, cur,
+ (FIELD_GET(MT_TOP_WFSYS_RESET_STATUS_MASK, cur) == enable),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_WFSYS_RESET_STATUS);
+}
+
+static int mt7986_wmac_wfsys_setting(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 cur;
+
+ /* Turn off wfsys2conn bus sleep protect */
+ mt76_rmw(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x0);
+
+ ret = mt7986_wmac_wfsys_poweron(dev, true);
+ if (ret)
+ return ret;
+
+ /* Check bus sleep protect */
+
+ ret = read_poll_timeout(mt76_rr, cur,
+ !(cur & MT_CONN_INFRA_CONN_WF_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_WFDMA2CONN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_SLP_STATUS);
+ if (ret)
+ return ret;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02060000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_IP_VERSION_ADDR);
+}
+
+static void mt7986_wmac_wfsys_set_timeout(struct mt7915_dev *dev)
+{
+ u32 mask = MT_MCU_BUS_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_TIMEOUT_CG_EN_MASK |
+ MT_MCU_BUS_TIMEOUT_EN_MASK;
+ u32 val = FIELD_PREP(MT_MCU_BUS_TIMEOUT_SET_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_CG_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_TIMEOUT, mask, val);
+
+ mt76_wr(dev, MT_MCU_BUS_REMAP, 0x810f0000);
+
+ mask = MT_MCU_BUS_DBG_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_EN_MASK;
+ val = FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_SET_MASK, 0x3aa) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_DBG_TIMEOUT, mask, val);
+}
+
+static int mt7986_wmac_sku_update(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 val;
+
+ if (is_7976(dev, 0, adie_type) && is_7976(dev, 1, adie_type))
+ val = 0xf;
+ else if (is_7975(dev, 0, adie_type) && is_7975(dev, 1, adie_type))
+ val = 0xd;
+ else if (is_7976(dev, 0, adie_type))
+ val = 0x7;
+ else if (is_7975(dev, 1, adie_type))
+ val = 0x8;
+ else if (is_7976(dev, 1, adie_type))
+ val = 0xa;
+ else
+ return -EINVAL;
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_SKU, MT_TOP_POS_SKU_MASK,
+ FIELD_PREP(MT_TOP_POS_SKU_MASK, val));
+
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, val);
+
+ return 0;
+}
+
+static int
+mt7986_wmac_adie_setup(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ if (!(is_7975(dev, adie, adie_type) || is_7976(dev, adie, adie_type)))
+ return 0;
+
+ ret = mt7986_wmac_adie_cfg(dev, adie, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_afe_cal(dev, adie, false, adie_type);
+ if (ret)
+ return ret;
+
+ if (!adie && (mt7986_wmac_check_adie_type(dev) == ADIE_DBDC))
+ ret = mt7986_wmac_afe_cal(dev, adie, true, adie_type);
+
+ return ret;
+}
+
+static int mt7986_wmac_subsys_powerup(struct mt7915_dev *dev, u32 adie_type)
+{
+ int ret;
+
+ mt7986_wmac_subsys_setting(dev);
+
+ ret = mt7986_wmac_bus_timeout(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_clock_enable(dev, adie_type);
+
+ return 0;
+}
+
+static int mt7986_wmac_wfsys_powerup(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7986_wmac_wm_enable(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_setting(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_wfsys_set_timeout(dev);
+
+ return mt7986_wmac_wm_enable(dev, true);
+}
+
+int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 adie_type;
+
+ ret = mt7986_wmac_consys_reset(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_gpio_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_consys_lockup(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_coninfra_check(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_coninfra_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_sku_setup(dev, &adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 0, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 1, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_subsys_powerup(dev, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_top_wfsys_wakeup(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_powerup(dev);
+ if (ret)
+ return ret;
+
+ return mt7986_wmac_sku_update(dev, adie_type);
+}
+
+void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ mt7986_wmac_top_wfsys_wakeup(dev, true);
+
+ /* Turn on wfsys2conn bus sleep protect */
+ mt76_rmw_field(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x1);
+
+ /* Check wfsys2conn bus sleep protect */
+ read_poll_timeout(mt76_rr, cur, !(cur ^ MT_CONN_INFRA_CONN),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+
+ mt7986_wmac_wfsys_poweron(dev, false);
+
+ /* Turn back wpll setting */
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_MCU_BPLL_CFG_MASK, 0x2);
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_WPLL_CFG_MASK, 0x2);
+
+ /* Reset EMI */
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x0);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x0);
+
+ mt7986_wmac_top_wfsys_wakeup(dev, false);
+ mt7986_wmac_consys_lockup(dev, true);
+ mt7986_wmac_consys_reset(dev, false);
+}
+
+static int mt7986_wmac_init(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct platform_device *pfdev = to_platform_device(pdev);
+
+ dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
+ if (IS_ERR(dev->dcm))
+ return PTR_ERR(dev->dcm);
+
+ dev->sku = devm_platform_ioremap_resource(pfdev, 2);
+ if (IS_ERR(dev->sku))
+ return PTR_ERR(dev->sku);
+
+ dev->rstc = devm_reset_control_get(pdev, "consys");
+ if (IS_ERR(dev->rstc))
+ return PTR_ERR(dev->rstc);
+
+ return mt7986_wmac_enable(dev);
+}
+
+static int mt7986_wmac_probe(struct platform_device *pdev)
+{
+ void __iomem *mem_base;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int irq, ret;
+ u32 chip_id;
+
+ chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ dev = mt7915_mmio_probe(&pdev->dev, mem_base, chip_id);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_device;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ ret = mt7986_wmac_init(dev);
+ if (ret)
+ goto free_irq;
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ devm_free_irq(mdev->dev, irq, dev);
+
+free_device:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static int mt7986_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7915_dev *dev = platform_get_drvdata(pdev);
+
+ mt7915_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7986_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7986-wmac", .data = (u32 *)0x7986 },
+ {},
+};
+
+struct platform_driver mt7986_wmac_driver = {
+ .driver = {
+ .name = "mt7986-wmac",
+ .of_match_table = mt7986_wmac_of_match,
+ },
+ .probe = mt7986_wmac_probe,
+ .remove = mt7986_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7986_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM_MT7975);
+MODULE_FIRMWARE(MT7986_ROM_PATCH);
+MODULE_FIRMWARE(MT7986_ROM_PATCH_MT7975);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index af80c2cf8c83..20f63644e929 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -23,30 +23,16 @@ struct reg_band {
u32 band[2];
};
-#define REG_BAND(_reg) \
- { .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) }
-#define REG_BAND_IDX(_reg, _idx) \
- { .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) }
-
-static const struct reg_band reg_backup_list[] = {
- REG_BAND_IDX(AGG_PCR0, 0),
- REG_BAND_IDX(AGG_PCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 0),
- REG_BAND_IDX(AGG_AWSCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 2),
- REG_BAND_IDX(AGG_AWSCR0, 3),
- REG_BAND(AGG_MRCR),
- REG_BAND(TMAC_TFCR0),
- REG_BAND(TMAC_TCR0),
- REG_BAND(AGG_ATCR1),
- REG_BAND(AGG_ATCR3),
- REG_BAND(TMAC_TRCR0),
- REG_BAND(TMAC_ICR0),
- REG_BAND_IDX(ARB_DRNGR0, 0),
- REG_BAND_IDX(ARB_DRNGR0, 1),
- REG_BAND(WF_RFCR),
- REG_BAND(WF_RFCR1),
-};
+#define REG_BAND(_list, _reg) \
+ { _list.band[0] = MT_##_reg(0); \
+ _list.band[1] = MT_##_reg(1); }
+#define REG_BAND_IDX(_list, _reg, _idx) \
+ { _list.band[0] = MT_##_reg(0, _idx); \
+ _list.band[1] = MT_##_reg(1, _idx); }
+
+#define TM_REG_MAX_ID 17
+static struct reg_band reg_backup_list[TM_REG_MAX_ID];
+
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
@@ -212,7 +198,6 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
u8 aifsn = TM_MIN_AIFSN;
u32 i2t_time, tr2t_time, txv_time;
- bool ext_phy = phy != &dev->phy;
u16 cw = 0;
if (ipg < sig_ext + slot_time + sifs)
@@ -242,29 +227,25 @@ mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
ipg -= aifsn * slot_time;
- if (ipg > TM_DEFAULT_SIFS) {
- if (ipg < TM_MAX_SIFS)
- sifs = ipg;
- else
- sifs = TM_MAX_SIFS;
- }
+ if (ipg > TM_DEFAULT_SIFS)
+ sifs = min_t(u32, ipg, TM_MAX_SIFS);
}
done:
- txv_time = mt76_get_field(dev, MT_TMAC_ATCR(ext_phy),
+ txv_time = mt76_get_field(dev, MT_TMAC_ATCR(phy->band_idx),
MT_TMAC_ATCR_TXV_TOUT);
txv_time *= 50; /* normal clock time */
i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
- mt76_set(dev, MT_TMAC_TRCR0(ext_phy),
+ mt76_set(dev, MT_TMAC_TRCR0(phy->band_idx),
FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
mt7915_tm_set_slot_time(phy, slot_time, sifs);
return mt7915_tm_set_wmm_qid(dev,
- mt7915_lmac_mapping(dev, IEEE80211_AC_BE),
+ mt76_connac_lmac_mapping(IEEE80211_AC_BE),
aifsn, cw, cw, 0);
}
@@ -290,6 +271,8 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
case MT76_TM_TX_MODE_OFDM:
if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
else
sband = &mphy->sband_2g.sband;
@@ -351,13 +334,30 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
{
int n_regs = ARRAY_SIZE(reg_backup_list);
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
u32 *b = phy->test.reg_backup;
int i;
+ REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
+ REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
+ REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
+ REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
+ REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
+ REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
+ REG_BAND(reg_backup_list[6], AGG_MRCR);
+ REG_BAND(reg_backup_list[7], TMAC_TFCR0);
+ REG_BAND(reg_backup_list[8], TMAC_TCR0);
+ REG_BAND(reg_backup_list[9], AGG_ATCR1);
+ REG_BAND(reg_backup_list[10], AGG_ATCR3);
+ REG_BAND(reg_backup_list[11], TMAC_TRCR0);
+ REG_BAND(reg_backup_list[12], TMAC_ICR0);
+ REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
+ REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
+ REG_BAND(reg_backup_list[15], WF_RFCR);
+ REG_BAND(reg_backup_list[16], WF_RFCR1);
+
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
- mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
+ mt76_wr(dev, reg_backup_list[i].band[phy->band_idx], b[i]);
return;
}
@@ -368,33 +368,33 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
phy->test.reg_backup = b;
for (i = 0; i < n_regs; i++)
- b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]);
+ b[i] = mt76_rr(dev, reg_backup_list[i].band[phy->band_idx]);
}
- mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT |
+ mt76_clear(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_MM_PROT |
MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
- mt76_set(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_PTA_WIN_DIS);
+ mt76_set(dev, MT_AGG_PCR0(phy->band_idx, 0), MT_AGG_PCR0_PTA_WIN_DIS);
- mt76_wr(dev, MT_AGG_PCR0(ext_phy, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
+ mt76_wr(dev, MT_AGG_PCR0(phy->band_idx, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
MT_AGG_PCR1_RTS0_LEN_THRES);
- mt76_clear(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_BAR_CNT_LIMIT |
+ mt76_clear(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_BAR_CNT_LIMIT |
MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
- mt76_rmw(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_RTS_FAIL_LIMIT |
+ mt76_rmw(dev, MT_AGG_MRCR(phy->band_idx), MT_AGG_MRCR_RTS_FAIL_LIMIT |
MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
- mt76_wr(dev, MT_TMAC_TFCR0(ext_phy), 0);
- mt76_clear(dev, MT_TMAC_TCR0(ext_phy), MT_TMAC_TCR0_TBTT_STOP_CTRL);
+ mt76_wr(dev, MT_TMAC_TFCR0(phy->band_idx), 0);
+ mt76_clear(dev, MT_TMAC_TCR0(phy->band_idx), MT_TMAC_TCR0_TBTT_STOP_CTRL);
/* config rx filter for testmode rx */
- mt76_wr(dev, MT_WF_RFCR(ext_phy), 0xcf70a);
- mt76_wr(dev, MT_WF_RFCR1(ext_phy), 0);
+ mt76_wr(dev, MT_WF_RFCR(phy->band_idx), 0xcf70a);
+ mt76_wr(dev, MT_WF_RFCR1(phy->band_idx), 0);
}
static void
@@ -452,7 +452,7 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
u8 tx_ant = td->tx_antenna_mask;
if (phy != &dev->phy)
- tx_ant >>= 2;
+ tx_ant >>= dev->chainshift;
phy->test.spe_idx = spe_idx_map[tx_ant];
}
}
@@ -574,6 +574,8 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
if (chandef->chan->band == NL80211_BAND_5GHZ)
sband = &phy->mt76->sband_5g.sband;
+ else if (chandef->chan->band == NL80211_BAND_6GHZ)
+ sband = &phy->mt76->sband_6g.sband;
else
sband = &phy->mt76->sband_2g.sband;
@@ -720,11 +722,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
{
struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
enum mt76_rxq_id q;
void *rx, *rssi;
u16 fcs_err;
int i;
+ u32 cnt;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
@@ -768,9 +770,11 @@ mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
nla_nest_end(msg, rx);
- fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
- q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
+ fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
+
+ q = phy->band_idx ? MT_RXQ_EXT : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
index 71154fc2a87c..adff2d7350b5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
@@ -24,3 +24,14 @@ config MT7921S
This adds support for MT7921S 802.11ax 2x2:2SS wireless devices.
To compile this driver as a module, choose M here.
+
+config MT7921U
+ tristate "MediaTek MT7921U (USB) support"
+ select MT76_USB
+ select MT7921_COMMON
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7921U 802.11ax 2x2:2SS wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index 1187acedfeda..0a146818c623 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o
obj-$(CONFIG_MT7921E) += mt7921e.o
obj-$(CONFIG_MT7921S) += mt7921s.o
+obj-$(CONFIG_MT7921U) += mt7921u.o
CFLAGS_trace.o := -I$(src)
@@ -10,3 +11,4 @@ mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o
mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o
mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o
+mt7921u-y := usb.o usb_mac.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 86fd7292b229..bce76417f95d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -129,23 +129,22 @@ mt7921_queues_acq(struct seq_file *s, void *data)
mt7921_mutex_acquire(dev);
- for (i = 0; i < 16; i++) {
- int j, acs = i / 4, index = i % 4;
+ for (i = 0; i < 4; i++) {
u32 ctrl, val, qlen = 0;
+ int j;
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
- ctrl = BIT(31) | BIT(15) | (acs << 8);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(i));
+ ctrl = BIT(31) | BIT(11) | (i << 24);
for (j = 0; j < 32; j++) {
if (val & BIT(j))
continue;
- mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
- ctrl | (j + (index << 5)));
+ mt76_wr(dev, MT_PLE_FL_Q0_CTRL, ctrl | j);
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
GENMASK(11, 0));
}
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ seq_printf(s, "AC%d: queued=%d\n", i, qlen);
}
mt7921_mutex_release(dev);
@@ -262,26 +261,21 @@ mt7921_txpwr(struct seq_file *s, void *data)
return 0;
}
-static void
-mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt7921_dev *dev = priv;
-
- mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
-}
-
static int
mt7921_pm_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ if (mt76_is_usb(&dev->mt76))
+ return -EOPNOTSUPP;
+
mutex_lock(&dev->mt76.mutex);
- if (val == pm->enable)
+ if (val == pm->enable_user)
goto out;
- if (!pm->enable) {
+ if (!pm->enable_user) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
@@ -291,13 +285,8 @@ mt7921_pm_set(void *data, u64 val)
pm->enable = false;
mt76_connac_pm_wake(&dev->mphy, pm);
- ieee80211_iterate_active_interfaces(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_pm_interface_iter, dev);
-
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
-
- pm->enable = val;
+ pm->enable_user = val;
+ mt7921_set_runtime_pm(dev);
mt76_connac_power_save_sched(&dev->mphy, pm);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -310,7 +299,7 @@ mt7921_pm_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
- *val = dev->pm.enable;
+ *val = dev->pm.enable_user;
return 0;
}
@@ -322,13 +311,20 @@ mt7921_deep_sleep_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR);
bool enable = !!val;
+ if (mt76_is_usb(&dev->mt76))
+ return -EOPNOTSUPP;
+
mt7921_mutex_acquire(dev);
- if (pm->ds_enable != enable) {
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
- pm->ds_enable = enable;
- }
+ if (pm->ds_enable_user == enable)
+ goto out;
+
+ pm->ds_enable_user = enable;
+ pm->ds_enable = enable && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+out:
mt7921_mutex_release(dev);
return 0;
@@ -339,7 +335,7 @@ mt7921_deep_sleep_get(void *data, u64 *val)
{
struct mt7921_dev *dev = data;
- *val = dev->pm.ds_enable;
+ *val = dev->pm.ds_enable_user;
return 0;
}
@@ -438,8 +434,13 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7921_queues_read);
+ if (mt76_is_mmio(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+ dir, mt7921_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues",
+ dir, mt76_queues_read);
+
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7921_queues_acq);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index cdff1fd52d93..ca7e20fb5fc0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -5,7 +5,7 @@
#include "../dma.h"
#include "mac.h"
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
+static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
{
int i, err;
@@ -78,110 +78,6 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
}
-static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
-{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
- int i;
-
- if (addr < 0x100000)
- return addr;
-
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
- u32 ofs;
-
- if (addr < fixed_map[i].phys)
- continue;
-
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
- continue;
-
- return fixed_map[i].mapped + ofs;
- }
-
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
- return mt7921_reg_map_l1(dev, addr);
-
- dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
- addr);
-
- return 0;
-}
-
-static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rr(mdev, addr);
-}
-
-static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- dev->bus_ops->wr(mdev, addr, val);
-}
-
-static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
if (force) {
@@ -341,23 +237,8 @@ int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
int mt7921_dma_init(struct mt7921_dev *dev)
{
- struct mt76_bus_ops *bus_ops;
int ret;
- dev->phy.dev = dev;
- dev->phy.mt76 = &dev->mt76.phy;
- dev->mt76.phy.priv = &dev->phy;
- dev->bus_ops = dev->mt76.bus;
- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
- GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
-
- bus_ops->rr = mt7921_rr;
- bus_ops->wr = mt7921_wr;
- bus_ops->rmw = mt7921_rmw;
- dev->mt76.bus = bus_ops;
-
mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ad59ef9839dc..91fc41922d95 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -165,7 +165,7 @@ out:
static int mt7921_init_hardware(struct mt7921_dev *dev)
{
- int ret, idx, i;
+ int ret, i;
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
@@ -182,6 +182,13 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
return ret;
}
+ return 0;
+}
+
+static int mt7921_init_wcid(struct mt7921_dev *dev)
+{
+ int idx;
+
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
if (idx)
@@ -195,6 +202,38 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
return 0;
}
+static void mt7921_init_work(struct work_struct *work)
+{
+ struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
+ init_work);
+ int ret;
+
+ ret = mt7921_init_hardware(dev);
+ if (ret)
+ return;
+
+ mt76_set_stream_caps(&dev->mphy, true);
+ mt7921_set_stream_he_caps(&dev->phy);
+
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret) {
+ dev_err(dev->mt76.dev, "register device failed\n");
+ return;
+ }
+
+ ret = mt7921_init_debugfs(dev);
+ if (ret) {
+ dev_err(dev->mt76.dev, "register debugfs failed\n");
+ return;
+ }
+
+ /* we support chip reset now */
+ dev->hw_init_done = true;
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
+}
+
int mt7921_register_device(struct mt7921_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -222,21 +261,22 @@ int mt7921_register_device(struct mt7921_dev *dev)
spin_lock_init(&dev->sta_poll_lock);
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
+ INIT_WORK(&dev->init_work, mt7921_init_work);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
-
- /* TODO: mt7921s run sleep mode on default */
- if (mt76_is_mmio(&dev->mt76)) {
+ if (!mt76_is_usb(&dev->mt76)) {
+ dev->pm.enable_user = true;
dev->pm.enable = true;
+ dev->pm.ds_enable_user = true;
dev->pm.ds_enable = true;
}
- if (mt76_is_sdio(&dev->mt76))
+ if (!mt76_is_mmio(&dev->mt76))
hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
- ret = mt7921_init_hardware(dev);
+ ret = mt7921_init_wcid(dev);
if (ret)
return ret;
@@ -264,23 +304,7 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- mt76_set_stream_caps(&dev->mphy, true);
- mt7921_set_stream_he_caps(&dev->phy);
-
- ret = mt76_register_device(&dev->mt76, true, mt76_rates,
- ARRAY_SIZE(mt76_rates));
- if (ret)
- return ret;
-
- ret = mt7921_init_debugfs(dev);
- if (ret)
- return ret;
-
- ret = mt76_connac_mcu_set_deep_sleep(&dev->mt76, dev->pm.ds_enable);
- if (ret)
- return ret;
-
- dev->hw_init_done = true;
+ queue_work(system_wq, &dev->init_work);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index ec10f95a4649..233998ca4857 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -116,7 +116,7 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7921_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
@@ -176,8 +176,8 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
u32 ru_h, ru_l;
u8 ru, offs = 0;
- ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
- ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
+ ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+ ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
ru = (u8)(ru_l | ru_h << 4);
status->bw = RATE_INFO_BW_HE_RU;
@@ -247,19 +247,19 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
- he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, le32_to_cpu(rxv[3]));
+ he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
if (status->bw >= RATE_INFO_BW_40) {
he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
he_mu->ru_ch2[0] =
- FIELD_GET(MT_CRXV_HE_RU1, le32_to_cpu(rxv[3]));
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
}
if (status->bw >= RATE_INFO_BW_80) {
he_mu->ru_ch1[1] =
- FIELD_GET(MT_CRXV_HE_RU2, le32_to_cpu(rxv[3]));
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
he_mu->ru_ch2[1] =
- FIELD_GET(MT_CRXV_HE_RU3, le32_to_cpu(rxv[3]));
+ le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
}
}
@@ -304,15 +304,16 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode)
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
- HE_BITS(DATA1_UL_DL_KNOWN);
+ HE_BITS(DATA1_UL_DL_KNOWN) |
+ HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
break;
@@ -402,15 +403,15 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
- __le32 qos_ctrl, ht_ctrl;
+ u16 frame_control;
- if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
+ if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
MT_RXD3_NORMAL_U2M)
return -EINVAL;
@@ -424,47 +425,52 @@ static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(&eth_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
- hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
- hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
- qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
- ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]);
-
+ frame_control = le32_get_bits(rxd[6], MT_RXD6_FRAME_CONTROL);
+ hdr.frame_control = cpu_to_le16(frame_control);
+ hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_SEQ_CTRL));
hdr.duration_id = 0;
+
ether_addr_copy(hdr.addr1, vif->addr);
ether_addr_copy(hdr.addr2, sta->addr);
- switch (le16_to_cpu(hdr.frame_control) &
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
+ switch (frame_control & (IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_FROMDS)) {
case 0:
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
if (ieee80211_has_order(hdr.frame_control))
- memcpy(skb_push(skb, 2), &ht_ctrl, 2);
- if (ieee80211_is_data_qos(hdr.frame_control))
- memcpy(skb_push(skb, 2), &qos_ctrl, 2);
+ memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[9],
+ IEEE80211_HT_CTL_LEN);
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ __le16 qos_ctrl;
+
+ qos_ctrl = cpu_to_le16(le32_get_bits(rxd[8], MT_RXD8_QOS_CTL));
+ memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
+ IEEE80211_QOS_CTL_LEN);
+ }
+
if (ieee80211_has_a4(hdr.frame_control))
memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
else
@@ -666,9 +672,6 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
status->chain_signal[i]);
}
- if (status->signal == -128)
- status->flag |= RX_FLAG_NO_SIGNAL_VAL;
-
stbc = FIELD_GET(MT_PRXV_STBC, v0);
gi = FIELD_GET(MT_PRXV_SGI, v0);
cck = false;
@@ -912,11 +915,18 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
val = MT_TXD3_SN_VALID |
FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
txwi[3] |= cpu_to_le32(val);
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
- val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
- txwi[7] |= cpu_to_le32(val);
+ if (mt76_is_mmio(&dev->mt76)) {
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ txwi[7] |= cpu_to_le32(val);
+ } else {
+ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ txwi[8] |= cpu_to_le32(val);
+ }
}
void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
@@ -950,7 +960,7 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
- mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
@@ -1016,7 +1026,7 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
return;
- tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
if (tid >= 6) /* skip VO queue */
return;
@@ -1092,7 +1102,6 @@ mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
@@ -1156,18 +1165,13 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
- u32 txs;
u8 pid;
- txs = le32_to_cpu(txs_data[0]);
- if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
return;
- txs = le32_to_cpu(txs_data[2]);
- wcidx = FIELD_GET(MT_TXS2_WCID, txs);
-
- txs = le32_to_cpu(txs_data[3]);
- pid = FIELD_GET(MT_TXS3_PID, txs);
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
if (pid < MT_PACKET_ID_FIRST)
return;
@@ -1195,6 +1199,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
out:
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
@@ -1205,8 +1210,8 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
enum rx_pkt_type type;
u16 flag;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+ flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
type = PKT_TYPE_NORMAL_MCU;
@@ -1548,7 +1553,16 @@ void mt7921_pm_power_save_work(struct work_struct *work)
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
+ dev->fw_assert)
+ goto out;
+
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
@@ -1610,3 +1624,94 @@ void mt7921_coredump_work(struct work_struct *work)
mt7921_reset(&dev->mt76);
}
+
+/* usb_sdio */
+static void
+mt7921_usb_sdio_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
+ enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
+ struct sk_buff *skb)
+{
+ __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
+
+ memset(txwi, 0, MT_SDIO_TXD_SIZE);
+ mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
+ skb_push(skb, MT_SDIO_TXD_SIZE);
+}
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct sk_buff *skb = tx_info->skb;
+ int err, pad, pktid, type;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ if (sta) {
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->last_txs = jiffies;
+ }
+ }
+
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
+
+ type = mt76_is_sdio(mdev) ? MT7921_SDIO_DATA : 0;
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
+ pad = round_up(skb->len, 4) - skb->len;
+ if (mt76_is_usb(mdev))
+ pad += 4;
+
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_prepare_skb);
+
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e)
+{
+ __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
+ unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid = rcu_dereference(mdev->wcid[idx]);
+ sta = wcid_to_sta(wcid);
+
+ if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ skb_pull(e->skb, headroom);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_complete_skb);
+
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mac_sta_poll(dev);
+ mt7921_mutex_release(dev);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index 544a1c33126a..79447e2d0143 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -202,6 +202,7 @@ enum tx_mcu_port_q_idx {
#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
#define MT_SDIO_TAIL_SIZE 8
#define MT_SDIO_HDR_SIZE 4
+#define MT_USB_TAIL_SIZE 4
#define MT_TXD0_Q_IDX GENMASK(31, 25)
#define MT_TXD0_PKT_FMT GENMASK(24, 23)
@@ -284,6 +285,9 @@ enum tx_mcu_port_q_idx {
#define MT_TXD7_HW_AMSDU BIT(10)
#define MT_TXD7_TX_TIME GENMASK(9, 0)
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
+
#define MT_TX_RATE_STBC BIT(13)
#define MT_TX_RATE_NSS GENMASK(12, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 7a8d2596c226..fdaf2451bc1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -264,7 +264,7 @@ static int mt7921_start(struct ieee80211_hw *hw)
return err;
}
-static void mt7921_stop(struct ieee80211_hw *hw)
+void mt7921_stop(struct ieee80211_hw *hw)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
@@ -273,6 +273,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
+ cancel_work_sync(&dev->reset_work);
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7921_mutex_acquire(dev);
@@ -280,6 +281,7 @@ static void mt7921_stop(struct ieee80211_hw *hw)
mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false);
mt7921_mutex_release(dev);
}
+EXPORT_SYMBOL_GPL(mt7921_stop);
static int mt7921_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
@@ -452,19 +454,64 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
- err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif,
+ &mvif->wep_sta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &mvif->wep_sta->wcid, cmd);
out:
mt7921_mutex_release(dev);
return err;
}
+static void
+mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+
+ mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
+}
+
+static void
+mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ mt7921_mcu_set_sniffer(dev, vif, monitor);
+ pm->enable = !monitor;
+ pm->ds_enable = !monitor;
+
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+
+ if (monitor)
+ mt7921_mcu_set_beacon_filter(dev, vif, false);
+}
+
+void mt7921_set_runtime_pm(struct mt7921_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ pm->enable = pm->enable_user && !monitor;
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_pm_interface_iter, dev);
+ pm->ds_enable = pm->ds_enable_user && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+}
+
static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
@@ -488,16 +535,10 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
-
- if (!enabled)
- phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
- else
- phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
-
- mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
- enabled);
- mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_sniffer_interface_iter, dev);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
}
out:
@@ -510,11 +551,10 @@ static int
mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7921_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index ef1e1ef91611..da2be050ed7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -67,25 +67,6 @@ struct mt7921_fw_region {
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_ENCRY_MODE BIT(4)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
-
#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
#define PATCH_SEC_ENC_TYPE_AES 0x01
@@ -93,52 +74,6 @@ struct mt7921_fw_region {
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
-static enum mcu_cipher_type
-mt7921_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
@@ -465,95 +400,6 @@ void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
/** starec & wtbl **/
-static int
-mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7921_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7921_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
-}
-
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
@@ -564,6 +410,7 @@ int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
msta->wcid.amsdu = false;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
@@ -574,23 +421,10 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
-int mt7921_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
-
static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
{
u32 mode = DL_MODE_NEED_RSP;
@@ -707,12 +541,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
if (mt76_is_sdio(&dev->mt76)) {
/* activate again */
ret = __mt7921_mcu_fw_pmctrl(dev);
- if (ret)
- return ret;
-
- ret = __mt7921_mcu_drv_pmctrl(dev);
- if (ret)
- return ret;
+ if (!ret)
+ ret = __mt7921_mcu_drv_pmctrl(dev);
}
out:
@@ -730,22 +560,6 @@ out:
return ret;
}
-static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= (feature_set & FW_FEATURE_ENCRY_MODE) ?
- DL_CONFIG_ENCRY_MODE_SEL : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
const struct mt7921_fw_trailer *hdr,
@@ -763,7 +577,8 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
region = (const struct mt7921_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
- mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
@@ -920,33 +735,26 @@ EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
-#define WMM_AIFS_SET BIT(0)
-#define WMM_CW_MIN_SET BIT(1)
-#define WMM_CW_MAX_SET BIT(2)
-#define WMM_TXOP_SET BIT(3)
-#define WMM_PARAM_SET GENMASK(3, 0)
-#define TX_CMD_MODE 1
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct edca {
- u8 queue;
- u8 set;
- u8 aifs;
- u8 cw_min;
+ __le16 cw_min;
__le16 cw_max;
__le16 txop;
- };
+ __le16 aifs;
+ u8 guardtime;
+ u8 acm;
+ } __packed;
struct mt7921_mcu_tx {
- u8 total;
- u8 action;
- u8 valid;
- u8 mode;
-
struct edca edca[IEEE80211_NUM_ACS];
+ u8 bss_idx;
+ u8 qos;
+ u8 wmm_idx;
+ u8 pad;
} __packed req = {
- .valid = true,
- .mode = TX_CMD_MODE,
- .total = IEEE80211_NUM_ACS,
+ .bss_idx = mvif->mt76.idx,
+ .qos = vif->bss_conf.qos,
+ .wmm_idx = mvif->mt76.wmm_idx,
};
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mu_edca {
u8 cw_min;
u8 cw_max;
@@ -970,30 +778,29 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.qos = vif->bss_conf.qos,
.wmm_idx = mvif->mt76.wmm_idx,
};
+ static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
- struct edca *e = &req.edca[ac];
+ struct edca *e = &req.edca[to_aci[ac]];
- e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
- e->aifs = q->aifs;
+ e->aifs = cpu_to_le16(q->aifs);
e->txop = cpu_to_le16(q->txop);
if (q->cw_min)
- e->cw_min = fls(q->cw_min);
+ e->cw_min = cpu_to_le16(q->cw_min);
else
- e->cw_min = 5;
+ e->cw_min = cpu_to_le16(5);
if (q->cw_max)
- e->cw_max = cpu_to_le16(fls(q->cw_max));
+ e->cw_max = cpu_to_le16(q->cw_max);
else
e->cw_max = cpu_to_le16(10);
}
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
- &req, sizeof(req), true);
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
+ sizeof(req), false);
if (ret)
return ret;
@@ -1003,7 +810,6 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
- int to_aci[] = {1, 0, 2, 3};
if (!mvif->queue_params[ac].mu_edca)
break;
@@ -1046,7 +852,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7921_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy != &dev->phy,
@@ -1057,10 +863,13 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
else
req.channel_band = chandef->chan->band;
- if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
+ dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
@@ -1093,30 +902,6 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
-{
- struct mt7921_mcu_eeprom_info req = {
- .addr = cpu_to_le32(round_down(offset, 16)),
- };
- struct mt7921_mcu_eeprom_info *res;
- struct sk_buff *skb;
- int ret;
- u8 *buf;
-
- ret = mt76_mcu_send_and_get_msg(&dev->mt76,
- MCU_EXT_QUERY(EFUSE_ACCESS),
- &req, sizeof(req), true, &skb);
- if (ret)
- return ret;
-
- res = (struct mt7921_mcu_eeprom_info *)skb->data;
- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
- memcpy(buf, res->data, 16);
- dev_kfree_skb(skb);
-
- return 0;
-}
-
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
@@ -1351,3 +1136,33 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
return 0;
}
+
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct {
+ struct {
+ u8 band_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct sniffer_enable_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 enable;
+ u8 pad[3];
+ } __packed enable;
+ } req = {
+ .hdr = {
+ .band_idx = mvif->band_idx,
+ },
+ .enable = {
+ .tag = cpu_to_le16(0),
+ .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
+ .enable = enable,
+ },
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
+ true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 96647801850a..7690364bc079 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -30,6 +30,7 @@
#define MT7921_DRV_OWN_RETRY_COUNT 10
#define MT7921_MCU_INIT_RETRY_COUNT 10
+#define MT7921_WFSYS_INIT_RETRY_COUNT 2
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
@@ -89,11 +90,6 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
-struct mt7921_sta_key_conf {
- s8 keyidx;
- u8 key[16];
-};
-
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
@@ -106,7 +102,7 @@ struct mt7921_sta {
unsigned long ampdu_state;
struct mt76_sta_stats stats;
- struct mt7921_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
};
DECLARE_EWMA(rssi, 10, 8);
@@ -209,6 +205,8 @@ struct mt7921_dev {
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
+ struct work_struct init_work;
+
u8 fw_debug;
struct mt76_connac_pm pm;
@@ -277,12 +275,6 @@ mt7921_hw_dev(struct ieee80211_hw *hw)
#define mt7921_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
-static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac)
-{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
-}
-
extern const struct ieee80211_ops mt7921_ops;
extern struct pci_driver mt7921_pci_driver;
@@ -296,16 +288,12 @@ int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset);
int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
@@ -367,17 +355,20 @@ static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
}
-static inline void mt7921_skb_add_sdio_hdr(struct sk_buff *skb,
- enum mt7921_sdio_pkt_type type)
+static inline void
+mt7921_skb_add_usb_sdio_hdr(struct mt7921_dev *dev, struct sk_buff *skb,
+ int type)
{
- u32 hdr;
+ u32 hdr, len;
- hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, skb->len + sizeof(hdr)) |
+ len = mt76_is_usb(&dev->mt76) ? skb->len : skb->len + sizeof(hdr);
+ hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, len) |
FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type);
put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr)));
}
+void mt7921_stop(struct ieee80211_hw *hw);
int mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
@@ -399,7 +390,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7921_tx_worker(struct mt76_worker *w);
void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
void mt7921_tx_token_put(struct mt7921_dev *dev);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -424,7 +414,6 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
void mt7921_pm_power_save_work(struct work_struct *work);
-bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_wfsys_reset(struct mt7921_dev *dev);
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
@@ -442,8 +431,8 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
-int mt7921_mcu_restart(struct mt76_dev *dev);
+bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
@@ -452,17 +441,33 @@ int mt7921e_mcu_init(struct mt7921_dev *dev);
int mt7921s_wfsys_reset(struct mt7921_dev *dev);
int mt7921s_mac_reset(struct mt7921_dev *dev);
int mt7921s_init_reset(struct mt7921_dev *dev);
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_init(struct mt7921_dev *dev);
int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info);
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
+void mt7921_set_runtime_pm(struct mt7921_dev *dev);
+int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+
+int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7921_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+ struct mt76_queue_entry *e);
+bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
+
+/* usb */
+#define MT_USB_TYPE_VENDOR (USB_TYPE_VENDOR | 0x1f)
+#define MT_USB_TYPE_UHW_VENDOR (USB_TYPE_VENDOR | 0x1e)
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev);
+int mt7921u_wfsys_reset(struct mt7921_dev *dev);
+int mt7921u_dma_init(struct mt7921_dev *dev);
+int mt7921u_init_reset(struct mt7921_dev *dev);
+int mt7921u_mac_reset(struct mt7921_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 9dae2f5972bf..1a01d025bbe5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -105,6 +105,7 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
@@ -121,6 +122,110 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
mt76_free_device(&dev->mt76);
}
+static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7921_reg_map_l1(dev, addr);
+
+ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
+ addr);
+
+ return 0;
+}
+
+static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -134,6 +239,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt7921e_tx_complete_skb,
+ .rx_check = mt7921e_rx_check,
.rx_skb = mt7921e_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
@@ -151,6 +257,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
+ struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
@@ -188,6 +295,25 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7921_rr;
+ bus_ops->wr = mt7921_wr;
+ bus_ops->rmw = mt7921_rmw;
+ dev->mt76.bus = bus_ops;
+
+ ret = __mt7921e_mcu_drv_pmctrl(dev);
+ if (ret)
+ return ret;
+
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index 85286cc9add1..5ca14dbbdd26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -137,7 +137,7 @@ mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
wcid_idx = wcid->idx;
} else {
- wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
@@ -148,14 +148,15 @@ out:
}
static void
-mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
+mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
- struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
+ struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
LIST_HEAD(free_list);
- struct sk_buff *tmp;
bool wake = false;
u8 i, count;
@@ -163,11 +164,10 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
- /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ return;
+
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
u8 stat;
@@ -208,8 +208,6 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
- napi_consume_skb(skb, 1);
-
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
@@ -222,6 +220,28 @@ mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7921e_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
@@ -229,11 +249,12 @@ void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *rxd = (__le32 *)skb->data;
enum rx_pkt_type type;
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
- mt7921_mac_tx_free(dev, skb);
+ mt7921e_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
break;
default:
mt7921_queue_rx_skb(mdev, q, skb);
@@ -314,6 +335,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
}
local_bh_enable();
+ dev->fw_assert = false;
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index a020352122a1..36669e5aeef3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -42,7 +42,7 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
.headroom = sizeof(struct mt7921_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt7921_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int err;
@@ -59,10 +59,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
return err;
}
-int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@@ -75,9 +73,21 @@ int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO;
- goto out;
}
+ return err;
+}
+
+int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err;
+
+ err = __mt7921e_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ goto out;
+
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index cbd38122c510..6712ff60c722 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -17,13 +17,12 @@
#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
-#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
-#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
-#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
+#define MT_PLE_FL_Q0_CTRL MT_PLE(0x3e0)
+#define MT_PLE_FL_Q1_CTRL MT_PLE(0x3e4)
+#define MT_PLE_FL_Q2_CTRL MT_PLE(0x3e8)
+#define MT_PLE_FL_Q3_CTRL MT_PLE(0x3ec)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
- ((n) << 2))
+#define MT_PLE_AC_QEMPTY(_n) MT_PLE(0x500 + 0x40 * (_n))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
#define MT_MDP_BASE 0x820cd000
@@ -354,6 +353,7 @@
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
@@ -378,6 +378,9 @@
#define MT_WFDMA0_TX_RING16_EXT_CTRL MT_WFDMA0(0x640)
#define MT_WFDMA0_TX_RING17_EXT_CTRL MT_WFDMA0(0x644)
+#define MT_WPDMA0_MAX_CNT_MASK GENMASK(7, 0)
+#define MT_WPDMA0_BASE_PTR_MASK GENMASK(31, 16)
+
#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
@@ -426,6 +429,10 @@
#define MT_WFDMA_DUMMY_CR MT_MCU_WPDMA0(0x120)
#define MT_WFDMA_NEED_REINIT BIT(1)
+#define MT_CBTOP_RGU(ofs) (0x70002000 + (ofs))
+#define MT_CBTOP_RGU_WF_SUBSYS_RST MT_CBTOP_RGU(0x600)
+#define MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH BIT(0)
+
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
@@ -434,12 +441,14 @@
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
-#define MT_DMA_SHDL(ofs) (0xd6000 + (ofs))
+#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs))
#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
#define MT_DMASHDL_DMASHDL_BYPASS BIT(28)
#define MT_DMASHDL_OPTIONAL MT_DMA_SHDL(0x008)
#define MT_DMASHDL_PAGE MT_DMA_SHDL(0x00c)
+#define MT_DMASHDL_GROUP_SEQ_ORDER BIT(16)
#define MT_DMASHDL_REFILL MT_DMA_SHDL(0x010)
+#define MT_DMASHDL_REFILL_MASK GENMASK(31, 16)
#define MT_DMASHDL_PKT_MAX_SIZE MT_DMA_SHDL(0x01c)
#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
@@ -454,6 +463,46 @@
#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2))
+#define MT_WFDMA_HOST_CONFIG 0x7c027030
+#define MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN BIT(6)
+
+#define MT_UMAC(ofs) (0x74000000 + (ofs))
+#define MT_UDMA_TX_QSEL MT_UMAC(0x008)
+#define MT_FW_DL_EN BIT(3)
+
+#define MT_UDMA_WLCFG_1 MT_UMAC(0x00c)
+#define MT_WL_RX_AGG_PKT_LMT GENMASK(7, 0)
+#define MT_WL_TX_TMOUT_LMT GENMASK(27, 8)
+
+#define MT_UDMA_WLCFG_0 MT_UMAC(0x18)
+#define MT_WL_RX_AGG_TO GENMASK(7, 0)
+#define MT_WL_RX_AGG_LMT GENMASK(15, 8)
+#define MT_WL_TX_TMOUT_FUNC_EN BIT(16)
+#define MT_WL_TX_DPH_CHK_EN BIT(17)
+#define MT_WL_RX_MPSZ_PAD0 BIT(18)
+#define MT_WL_RX_FLUSH BIT(19)
+#define MT_TICK_1US_EN BIT(20)
+#define MT_WL_RX_AGG_EN BIT(21)
+#define MT_WL_RX_EN BIT(22)
+#define MT_WL_TX_EN BIT(23)
+#define MT_WL_RX_BUSY BIT(30)
+#define MT_WL_TX_BUSY BIT(31)
+
+#define MT_UDMA_CONN_INFRA_STATUS MT_UMAC(0xa20)
+#define MT_UDMA_CONN_WFSYS_INIT_DONE BIT(22)
+#define MT_UDMA_CONN_INFRA_STATUS_SEL MT_UMAC(0xa24)
+
+#define MT_SSUSB_EPCTL_CSR(ofs) (0x74011800 + (ofs))
+#define MT_SSUSB_EPCTL_CSR_EP_RST_OPT MT_SSUSB_EPCTL_CSR(0x090)
+
+#define MT_UWFDMA0(ofs) (0x7c024000 + (ofs))
+#define MT_UWFDMA0_GLO_CFG MT_UWFDMA0(0x208)
+#define MT_UWFDMA0_GLO_CFG_EXT0 MT_UWFDMA0(0x2b0)
+#define MT_UWFDMA0_TX_RING_EXT_CTRL(_n) MT_UWFDMA0(0x600 + ((_n) << 2))
+
+#define MT_CONN_STATUS 0x7c053c10
+#define MT_WIFI_PATCH_DL_STATE BIT(0)
+
#define MT_CONN_ON_LPCTL 0x7c060010
#define PCIE_LPCR_HOST_OWN_SYNC BIT(2)
#define PCIE_LPCR_HOST_CLR_OWN BIT(1)
@@ -464,6 +513,7 @@
#define WFSYS_SW_INIT_DONE BIT(4)
#define MT_CONN_ON_MISC 0x7c0600f0
+#define MT_TOP_MISC2_FW_PWR_ON BIT(0)
#define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0)
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 65d693902c22..af26d59fa2f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -41,6 +41,7 @@ static void mt7921s_unregister_device(struct mt7921_dev *dev)
{
struct mt76_connac_pm *pm = &dev->pm;
+ cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -58,7 +59,10 @@ static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
struct mt7921_sdio_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err < 0)
return err;
@@ -88,9 +92,9 @@ static int mt7921s_probe(struct sdio_func *func,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
- .tx_prepare_skb = mt7921s_tx_prepare_skb,
- .tx_complete_skb = mt7921s_tx_complete_skb,
- .tx_status_data = mt7921s_tx_status_data,
+ .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
@@ -118,7 +122,7 @@ static int mt7921s_probe(struct sdio_func *func,
struct mt7921_dev *dev;
struct mt76_dev *mdev;
- int ret, i;
+ int ret;
mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
@@ -151,16 +155,6 @@ static int mt7921s_probe(struct sdio_func *func,
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index ccaf8134cec7..1b3adb3d91e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -60,7 +60,11 @@ int mt7921s_wfsys_reset(struct mt7921_dev *dev)
sdio_release_host(sdio->func);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
/* activate mt7921s again */
+ mt7921s_mcu_drv_pmctrl(dev);
+ mt76_clear(dev, MT_CONN_STATUS, MT_WIFI_PATCH_DL_STATE);
mt7921s_mcu_fw_pmctrl(dev);
mt7921s_mcu_drv_pmctrl(dev);
@@ -81,7 +85,6 @@ int mt7921s_init_reset(struct mt7921_dev *dev)
mt7921s_wfsys_reset(dev);
mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@@ -114,7 +117,6 @@ int mt7921s_mac_reset(struct mt7921_dev *dev)
mt76_worker_enable(&dev->mt76.sdio.net_worker);
dev->fw_assert = false;
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
@@ -138,86 +140,3 @@ out:
return err;
}
-
-static void
-mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
- enum mt76_txq_id qid, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key, int pid,
- struct sk_buff *skb)
-{
- __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
-
- memset(txwi, 0, MT_SDIO_TXD_SIZE);
- mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
- skb_push(skb, MT_SDIO_TXD_SIZE);
-}
-
-int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct sk_buff *skb = tx_info->skb;
- int err, pad, pktid;
-
- if (unlikely(tx_info->skb->len <= ETH_HLEN))
- return -EINVAL;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- if (sta) {
- struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
-
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
- }
- }
-
- pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
- mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
-
- mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
- pad = round_up(skb->len, 4) - skb->len;
-
- err = mt76_skb_adjust_pad(skb, pad);
- if (err)
- /* Release pktid in case of error. */
- idr_remove(&wcid->pktid, pktid);
-
- return err;
-}
-
-void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
- unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
- struct ieee80211_sta *sta;
- struct mt76_wcid *wcid;
- u16 idx;
-
- idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
- wcid = rcu_dereference(mdev->wcid[idx]);
- sta = wcid_to_sta(wcid);
-
- if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- skb_pull(e->skb, headroom);
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
-bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
-
- mt7921_mutex_acquire(dev);
- mt7921_mac_sta_poll(dev);
- mt7921_mutex_release(dev);
-
- return false;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index d20f2ff01be1..54a5c712a3c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -36,7 +36,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
- mt7921_skb_add_sdio_hdr(skb, type);
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, type);
pad = round_up(skb->len, 4) - skb->len;
__skb_put_zero(skb, pad);
@@ -49,6 +49,26 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
+static u32 mt7921s_read_rm3r(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ return sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+}
+
+static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 val;
+
+ val = sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+ if (val)
+ sdio_writel(sdio->func, H2D_SW_INT_CLEAR_MAILBOX_ACK,
+ MCR_WSICR, NULL);
+
+ return val;
+}
+
int mt7921s_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921s_mcu_ops = {
@@ -88,6 +108,12 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+
+ if (!err && test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ err = readx_poll_timeout(mt7921s_read_rm3r, dev, status,
+ status & D2HRM3R_IS_DRIVER_OWN,
+ 2000, 1000000);
+
sdio_release_host(func);
if (err < 0) {
@@ -115,12 +141,24 @@ int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
sdio_claim_host(func);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
+ err = readx_poll_timeout(mt7921s_clear_rm3r_drv_own,
+ dev, status,
+ !(status & D2HRM3R_IS_DRIVER_OWN),
+ 2000, 1000000);
+ if (err < 0) {
+ dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
+ goto err;
+ }
+ }
+
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
sdio_release_host(func);
+err:
if (err < 0) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
new file mode 100644
index 000000000000..b7771e9f1fcd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static const struct usb_device_id mt7921u_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff) },
+ { },
+};
+
+static u32 mt7921u_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | MT_USB_TYPE_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7921u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static u32 mt7921u_rmw(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | MT_USB_TYPE_VENDOR, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
+static void mt7921u_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+int mt7921u_mcu_power_on(struct mt7921_dev *dev)
+{
+ int ret;
+
+ ret = mt76u_vendor_request(&dev->mt76, MT_VEND_POWER_ON,
+ USB_DIR_OUT | MT_USB_TYPE_VENDOR,
+ 0x0, 0x1, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
+ MT_TOP_MISC2_FW_PWR_ON, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for power on\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int
+mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *seq)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 pad, ep;
+ int ret;
+
+ ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ if (ret)
+ return ret;
+
+ if (cmd != MCU_CMD(FW_SCATTER))
+ ep = MT_EP_OUT_INBAND_CMD;
+ else
+ ep = MT_EP_OUT_AC_BE;
+
+ mt7921_skb_add_usb_sdio_hdr(dev, skb, 0);
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ __skb_put_zero(skb, pad);
+
+ ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
+ 1000, ep);
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int mt7921u_mcu_init(struct mt7921_dev *dev)
+{
+ static const struct mt76_mcu_ops mcu_ops = {
+ .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+ .tailroom = MT_USB_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7921u_mcu_send_message,
+ .mcu_parse_response = mt7921_mcu_parse_response,
+ .mcu_restart = mt76_connac_mcu_restart,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mcu_ops;
+
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+ ret = mt7921_run_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ return 0;
+}
+
+static void mt7921u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ mt76u_stop_tx(&dev->mt76);
+ mt7921_stop(hw);
+}
+
+static void mt7921u_cleanup(struct mt7921_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ mt7921u_wfsys_reset(dev);
+ mt7921_mcu_exit(dev);
+ mt76u_queues_deinit(&dev->mt76);
+}
+
+static int mt7921u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_SDIO_TXD_SIZE,
+ .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7921_usb_sdio_tx_prepare_skb,
+ .tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
+ .tx_status_data = mt7921_usb_sdio_tx_status_data,
+ .rx_skb = mt7921_queue_rx_skb,
+ .sta_ps = mt7921_sta_ps,
+ .sta_add = mt7921_mac_sta_add,
+ .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_remove = mt7921_mac_sta_remove,
+ .update_survey = mt7921_update_channel,
+ };
+ static const struct mt7921_hif_ops hif_ops = {
+ .mcu_init = mt7921u_mcu_init,
+ .init_reset = mt7921u_init_reset,
+ .reset = mt7921u_mac_reset,
+ };
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt7921u_rr,
+ .wr = mt7921u_wr,
+ .rmw = mt7921u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt7921u_copy,
+ .type = MT76_BUS_USB,
+ };
+ struct usb_device *udev = interface_to_usbdev(usb_intf);
+ struct ieee80211_ops *ops;
+ struct ieee80211_hw *hw;
+ struct mt7921_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(&usb_intf->dev, &mt7921_ops, sizeof(mt7921_ops),
+ GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ ops->stop = mt7921u_stop;
+
+ mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->hif_ops = &hif_ops;
+
+ udev = usb_get_dev(udev);
+ usb_reset_device(udev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ ret = __mt76u_init(mdev, usb_intf, &bus_ops);
+ if (ret < 0)
+ goto error;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ if (mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY)) {
+ ret = mt7921u_wfsys_reset(dev);
+ if (ret)
+ goto error;
+ }
+
+ ret = mt7921u_mcu_power_on(dev);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_mcu_queue(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt76u_alloc_queues(&dev->mt76);
+ if (ret)
+ goto error;
+
+ ret = mt7921u_dma_init(dev);
+ if (ret)
+ return ret;
+
+ hw = mt76_hw(dev);
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = mdev->usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
+
+ ret = mt7921_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ mt76u_queues_deinit(&dev->mt76);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7921u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7921_dev *dev = usb_get_intfdata(usb_intf);
+
+ cancel_work_sync(&dev->init_work);
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return;
+
+ mt76_unregister_device(&dev->mt76);
+ mt7921u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ mt76_free_device(&dev->mt76);
+}
+
+MODULE_DEVICE_TABLE(usb, mt7921u_device_table);
+MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7921_ROM_PATCH);
+
+static struct usb_driver mt7921u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7921u_device_table,
+ .probe = mt7921u_probe,
+ .disconnect = mt7921u_disconnect,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7921u_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
new file mode 100644
index 000000000000..99bcbd858b65
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7921.h"
+#include "mcu.h"
+#include "mac.h"
+
+static u32 mt7921u_uhw_rr(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_DEV_MODE,
+ USB_DIR_IN | MT_USB_TYPE_UHW_VENDOR, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void mt7921u_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE,
+ USB_DIR_OUT | MT_USB_TYPE_UHW_VENDOR, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static void mt7921u_dma_prefetch(struct mt7921_dev *dev)
+{
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
+ MT_WPDMA0_BASE_PTR_MASK, 0x80);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
+ MT_WPDMA0_BASE_PTR_MASK, 0xc0);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
+ MT_WPDMA0_BASE_PTR_MASK, 0x100);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
+ MT_WPDMA0_BASE_PTR_MASK, 0x140);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
+ MT_WPDMA0_BASE_PTR_MASK, 0x180);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
+ MT_WPDMA0_BASE_PTR_MASK, 0x280);
+
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+ MT_WPDMA0_MAX_CNT_MASK, 4);
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
+ MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
+}
+
+static void mt7921u_wfdma_init(struct mt7921_dev *dev)
+{
+ mt7921u_dma_prefetch(dev);
+
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
+ mt76_set(dev, MT_UWFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL |
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ /* disable dmashdl */
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
+ MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+ mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+
+ mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
+}
+
+static int mt7921u_dma_rx_evt_ep4(struct mt7921_dev *dev)
+{
+ if (!mt76_poll(dev, MT_UWFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+ return -ETIMEDOUT;
+
+ mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_USB_RXEVT_EP4_EN);
+ mt76_set(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+
+ return 0;
+}
+
+static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset)
+{
+ u32 val;
+
+ /* usb endpoint reset opt
+ * bits[4,9]: out blk ep 4-9
+ * bits[20,21]: in blk ep 4-5
+ * bits[22]: in int ep 6
+ */
+ val = mt7921u_uhw_rr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT);
+ if (reset)
+ val |= GENMASK(9, 4) | GENMASK(22, 20);
+ else
+ val &= ~(GENMASK(9, 4) | GENMASK(22, 20));
+ mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val);
+}
+
+int mt7921u_dma_init(struct mt7921_dev *dev)
+{
+ int err;
+
+ mt7921u_wfdma_init(dev);
+
+ mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+
+ mt76_set(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_EN | MT_WL_TX_EN |
+ MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN);
+ mt76_clear(dev, MT_UDMA_WLCFG_0,
+ MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT);
+ mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT);
+
+ err = mt7921u_dma_rx_evt_ep4(dev);
+ if (err)
+ return err;
+
+ mt7921u_epctl_rst_opt(dev, false);
+
+ return 0;
+}
+
+int mt7921u_wfsys_reset(struct mt7921_dev *dev)
+{
+ u32 val;
+ int i;
+
+ mt7921u_epctl_rst_opt(dev, false);
+
+ val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val |= MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+ mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+ usleep_range(10, 20);
+
+ val = mt7921u_uhw_rr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST);
+ val &= ~MT_CBTOP_RGU_WF_SUBSYS_RST_WF_WHOLE_PATH;
+ mt7921u_uhw_wr(&dev->mt76, MT_CBTOP_RGU_WF_SUBSYS_RST, val);
+
+ mt7921u_uhw_wr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS_SEL, 0);
+ for (i = 0; i < MT7921_WFSYS_INIT_RETRY_COUNT; i++) {
+ val = mt7921u_uhw_rr(&dev->mt76, MT_UDMA_CONN_INFRA_STATUS);
+ if (val & MT_UDMA_CONN_WFSYS_INIT_DONE)
+ break;
+
+ msleep(100);
+ }
+
+ if (i == MT7921_WFSYS_INIT_RETRY_COUNT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int mt7921u_init_reset(struct mt7921_dev *dev)
+{
+ set_bit(MT76_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ mt7921_mcu_exit(dev);
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ mt7921u_wfsys_reset(dev);
+
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ return mt76u_resume_rx(&dev->mt76);
+}
+
+int mt7921u_mac_reset(struct mt7921_dev *dev)
+{
+ int err;
+
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&dev->mt76.tx_worker);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ mt7921_mcu_exit(dev);
+
+ mt76u_stop_rx(&dev->mt76);
+ mt76u_stop_tx(&dev->mt76);
+
+ mt7921u_wfsys_reset(dev);
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ err = mt76u_resume_rx(&dev->mt76);
+ if (err)
+ goto out;
+
+ err = mt7921u_mcu_power_on(dev);
+ if (err)
+ goto out;
+
+ err = mt7921u_dma_init(dev);
+ if (err)
+ goto out;
+
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ err = mt7921_run_firmware(dev);
+ if (err)
+ goto out;
+
+ mt76_clear(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
+
+ err = mt7921_mcu_set_eeprom(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mac_init(dev);
+ if (err)
+ goto out;
+
+ err = __mt7921_start(&dev->phy);
+out:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return err;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 54f72d215948..def7f325f5c5 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -12,6 +12,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/sched.h>
#include <linux/kthread.h>
@@ -627,6 +629,7 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
+ u32 host_max_cap;
int err;
err = mt76_worker_setup(dev->hw, &sdio->status_worker,
@@ -648,7 +651,16 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
dev->bus = bus_ops;
dev->sdio.func = func;
- return 0;
+ host_max_cap = min_t(u32, func->card->host->max_req_size,
+ func->cur_blksize *
+ func->card->host->max_blk_count);
+ dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
+ dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
+ GFP_KERNEL);
+ if (!dev->sdio.xmit_buf)
+ err = -ENOMEM;
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76s_init);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.h b/drivers/net/wireless/mediatek/mt76/sdio.h
index 99db4ad93b7c..27d5d2077eba 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.h
+++ b/drivers/net/wireless/mediatek/mt76/sdio.h
@@ -65,6 +65,7 @@
#define MCR_H2DSM0R 0x0070
#define H2D_SW_INT_READ BIT(16)
#define H2D_SW_INT_WRITE BIT(17)
+#define H2D_SW_INT_CLEAR_MAILBOX_ACK BIT(22)
#define MCR_H2DSM1R 0x0074
#define MCR_D2HRM0R 0x0078
@@ -109,6 +110,7 @@
#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
+#define D2HRM3R_IS_DRIVER_OWN BIT(0)
#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index 801590a0a334..a2601aa9e7b1 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -102,7 +102,10 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
put_page(page);
@@ -115,7 +118,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
__le32 *rxd = (__le32 *)buf;
/* parse rxd to get the actual packet length */
- len = FIELD_GET(GENMASK(15, 0), le32_to_cpu(rxd[0]));
+ len = le32_get_bits(rxd[0], GENMASK(15, 0));
e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
if (!e->skb)
break;
@@ -214,7 +217,10 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
+ sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
if (err)
dev_err(dev->dev, "sdio write failed: %d\n", err);
@@ -223,12 +229,11 @@ static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
u8 pad;
- qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
@@ -249,27 +254,25 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
}
pad = roundup(e->skb->len, 4) - e->skb->len;
- if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz)
break;
if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
&ple_sz))
break;
- memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
- skb_headlen(e->skb));
+ memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb));
len += skb_headlen(e->skb);
nframes++;
skb_walk_frags(e->skb, iter) {
- memcpy(sdio->xmit_buf[qid] + len, iter->data,
- iter->len);
+ memcpy(sdio->xmit_buf + len, iter->data, iter->len);
len += iter->len;
nframes++;
}
if (unlikely(pad)) {
- memset(sdio->xmit_buf[qid] + len, 0, pad);
+ memset(sdio->xmit_buf + len, 0, pad);
len += pad;
}
next:
@@ -278,8 +281,8 @@ next:
}
if (nframes) {
- memset(sdio->xmit_buf[qid] + len, 0, 4);
- err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ memset(sdio->xmit_buf + len, 0, 4);
+ err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4);
if (err)
return err;
}
@@ -298,6 +301,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
do {
nframes = 0;
@@ -327,6 +331,7 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio)
} while (nframes > 0);
/* enable interrupt */
+ sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
}
@@ -341,6 +346,7 @@ void mt76s_sdio_irq(struct sdio_func *func)
test_bit(MT76_MCU_RESET, &dev->phy.state))
return;
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
mt76_worker_schedule(&sdio->txrx_worker);
}
EXPORT_SYMBOL_GPL(mt76s_sdio_irq);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 1a01ad7a4c16..382b45639f26 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -409,7 +409,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = phy->dev;
struct mt76_testmode_data *td = &phy->test;
struct nlattr *tb[NUM_MT76_TM_ATTRS];
- bool ext_phy = phy != &dev->phy;
u32 state;
int err;
int i;
@@ -447,8 +446,8 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) ||
- mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask,
- 1 << (ext_phy * 2), phy->antenna_mask << (ext_phy * 2)) ||
+ mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA],
+ &td->tx_antenna_mask, 0, 0xff) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) ||
mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE],
&td->tx_duty_cycle, 0, 99) ||
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 0a7006c8959b..a85e192c9d59 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -15,9 +15,8 @@ static bool disable_usb_sg;
module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
-static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
- u8 req_type, u16 val, u16 offset,
- void *buf, size_t len)
+int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t len)
{
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
@@ -45,6 +44,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
req, offset, ret);
return ret;
}
+EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
@@ -62,22 +62,21 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
+u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
int ret;
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- addr >> 16, addr, usb->data,
- sizeof(__le32));
+ ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+ addr, usb->data, sizeof(__le32));
if (ret == sizeof(__le32))
data = get_unaligned_le32(usb->data);
trace_usb_reg_rr(dev, addr, data);
return data;
}
+EXPORT_SYMBOL_GPL(___mt76u_rr);
static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
{
@@ -95,7 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
break;
}
- return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
+ return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
+ addr & ~MT_VEND_TYPE_MASK);
}
static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -109,29 +109,17 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
-{
- u32 ret;
-
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
- return ret;
-}
-
-static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
- u32 addr, u32 val)
+void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
put_unaligned_le32(val, usb->data);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- addr >> 16, addr, usb->data,
- sizeof(__le32));
+ __mt76u_vendor_request(dev, req, req_type, addr >> 16,
+ addr, usb->data, sizeof(__le32));
trace_usb_reg_wr(dev, addr, val);
}
+EXPORT_SYMBOL_GPL(___mt76u_wr);
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
@@ -145,7 +133,8 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
req = MT_VEND_MULTI_WRITE;
break;
}
- ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
+ ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr & ~MT_VEND_TYPE_MASK, val);
}
static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -155,13 +144,6 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
-static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
-{
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-}
-
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
{
@@ -173,17 +155,6 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
return val;
}
-static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
- u32 mask, u32 val)
-{
- mutex_lock(&dev->usb.usb_ctrl_mtx);
- val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
- ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
- mutex_unlock(&dev->usb.usb_ctrl_mtx);
-
- return val;
-}
-
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
@@ -216,33 +187,8 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset,
mutex_unlock(&usb->usb_ctrl_mtx);
}
-static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
- const void *data, int len)
-{
- struct mt76_usb *usb = &dev->usb;
- int ret, i = 0, batch_len;
- const u8 *val = data;
-
- len = round_up(len, 4);
- mutex_lock(&usb->usb_ctrl_mtx);
- while (i < len) {
- batch_len = min_t(int, usb->data_len, len - i);
- memcpy(usb->data, val + i, batch_len);
- ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- (offset + i) >> 16, offset + i,
- usb->data, batch_len);
- if (ret < 0)
- break;
-
- i += batch_len;
- }
- mutex_unlock(&usb->usb_ctrl_mtx);
-}
-
-static void
-mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
- void *data, int len)
+void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
int i = 0, batch_len, ret;
@@ -264,6 +210,7 @@ mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
+EXPORT_SYMBOL_GPL(mt76u_read_copy);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val)
@@ -1112,24 +1059,13 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
-int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf, bool ext)
+int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ struct mt76_bus_ops *ops)
{
- static struct mt76_bus_ops mt76u_ops = {
- .read_copy = mt76u_read_copy_ext,
- .wr_rp = mt76u_wr_rp,
- .rd_rp = mt76u_rd_rp,
- .type = MT76_BUS_USB,
- };
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
int err;
- mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
- mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
- mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
- mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
-
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
@@ -1141,7 +1077,7 @@ int mt76u_init(struct mt76_dev *dev,
return -ENOMEM;
mutex_init(&usb->usb_ctrl_mtx);
- dev->bus = &mt76u_ops;
+ dev->bus = ops;
dev->queue_ops = &usb_queue_ops;
dev_set_drvdata(&udev->dev, dev);
@@ -1167,6 +1103,23 @@ int mt76u_init(struct mt76_dev *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(__mt76u_init);
+
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
+{
+ static struct mt76_bus_ops bus_ops = {
+ .rr = mt76u_rr,
+ .wr = mt76u_wr,
+ .rmw = mt76u_rmw,
+ .read_copy = mt76u_read_copy,
+ .write_copy = mt76u_copy,
+ .wr_rp = mt76u_wr_rp,
+ .rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
+ };
+
+ return __mt76u_init(dev, intf, &bus_ops);
+}
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index d2db52289399..18420e954402 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -725,10 +725,7 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
int nbytes;
u8 rsp;
- if (sz <= DATA_PKT_SZ)
- nbytes = sz;
- else
- nbytes = DATA_PKT_SZ;
+ nbytes = min_t(u32, sz, DATA_PKT_SZ);
/*
* Data Response header
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 2987ad9271f6..87e98ab068ed 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->sram = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (!local->sram)
+ goto failed;
/*** Set up 16k window for shared memory (receive buffer) ***************/
link->resource[3]->flags |=
@@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->rmem = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (!local->rmem)
+ goto failed;
/*** Set up window for attribute memory ***********************************/
link->resource[4]->flags |=
@@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link)
goto failed;
local->amem = ioremap(link->resource[4]->start,
resource_size(link->resource[4]));
+ if (!local->amem)
+ goto failed;
dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 7a0355dc6bab..32970ea4b4e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -208,7 +208,7 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_encalgo;
u8 entry_i;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 04735da11168..da54e51badd3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -396,36 +396,6 @@ void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
}
}
-static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
-{
- u32 u4b_tmp;
- u8 delay = 5;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- while (u4b_tmp != 0 && delay > 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
- delay--;
- }
- if (delay == 0) {
- rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
- rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!\n");
- return;
- }
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
- rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
-}
-
static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state)
{
@@ -519,7 +489,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
jiffies_to_msecs(jiffies -
ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
- _rtl92ce_phy_set_rf_sleep(hw);
+ _rtl92c_phy_set_rf_sleep(hw);
break;
}
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index b53daf1b29f7..876c14d46c2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -334,6 +334,7 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x0846, 0x9042, rtl92cu_hal_cfg)}, /*On Netwrks N150MA*/
/****** 8188CUS Slim Combo ********/
{RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index f6bff0ebd6b0..f3fe16798c59 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -872,7 +872,7 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
else
falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail;
- /*reset OFDM FA coutner*/
+ /*reset OFDM FA counter*/
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
/* reset CCK FA counter*/
@@ -1464,7 +1464,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
const u8 *delta_swing_table_idx_tup_b;
const u8 *delta_swing_table_idx_tdown_b;
- /*2. Initilization ( 7 steps in total )*/
+ /*2. Initialization ( 7 steps in total )*/
rtl8812ae_get_delta_swing_table(hw,
&delta_swing_table_idx_tup_a,
&delta_swing_table_idx_tdown_a,
@@ -2502,7 +2502,7 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
rtlpriv->dm.dbginfo.num_non_be_pkt = 0;
/*===============================
- * list paramter for different platform
+ * list parameter for different platform
*===============================
*/
pb_is_cur_rdl_state = &rtlpriv->dm.is_cur_rdlstate;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 2551e228b581..cac053f485c3 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -211,6 +211,10 @@ static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev)
bool is_cck_lock_rate = false;
+ if (coex_stat->wl_coex_mode != COEX_WLINK_2G1PORT &&
+ coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)
+ return;
+
if (coex_dm->bt_status == COEX_BTSTATUS_INQ_PAGE ||
coex_stat->bt_setup_link) {
coex_stat->wl_cck_lock = false;
@@ -460,6 +464,29 @@ static void rtw_coex_gnt_workaround(struct rtw_dev *rtwdev, bool force, u8 mode)
rtw_coex_set_gnt_fix(rtwdev);
}
+static void rtw_coex_monitor_bt_ctr(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u32 tmp;
+
+ tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
+ coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, tmp);
+ coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+ tmp = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
+ coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, tmp);
+ coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, tmp);
+
+ rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
+ BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
+ coex_stat->hi_pri_rx, coex_stat->hi_pri_tx,
+ coex_stat->lo_pri_rx, coex_stat->lo_pri_tx);
+}
+
static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -780,7 +807,9 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
{
struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
u8 link = 0;
u8 center_chan = 0;
u8 bw;
@@ -791,7 +820,9 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
if (type != COEX_MEDIA_DISCONNECT)
center_chan = rtwdev->hal.current_channel;
- if (center_chan == 0) {
+ if (center_chan == 0 ||
+ (efuse->share_ant && center_chan <= 14 &&
+ coex_stat->wl_coex_mode != COEX_WLINK_2GFREE)) {
link = 0;
center_chan = 0;
bw = 0;
@@ -930,6 +961,23 @@ static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state)
rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state);
}
+static void rtw_coex_mimo_ps(struct rtw_dev *rtwdev, bool force, bool state)
+{
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ if (!force && state == coex_stat->wl_mimo_ps)
+ return;
+
+ coex_stat->wl_mimo_ps = state;
+
+ rtw_set_txrx_1ss(rtwdev, state);
+
+ rtw_coex_update_wl_ch_info(rtwdev, (u8)coex_stat->wl_connected);
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], %s(): state = %d\n", __func__, state);
+}
+
static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force,
u8 table_case)
{
@@ -1106,7 +1154,8 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
ps_type = COEX_PS_WIFI_NATIVE;
rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
- } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
+ } else if ((byte1 & BIT(4) && !(byte1 & BIT(5))) ||
+ coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
rtw_dbg(rtwdev, RTW_DBG_COEX,
"[BTCoex], %s(): Force LPS (byte1 = 0x%x)\n", __func__,
byte1);
@@ -1802,6 +1851,54 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
}
+static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+
+ if (efuse->share_ant) {
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+ if (coex_stat->bt_whck_test)
+ table_case = 2;
+ else if (coex_stat->wl_linkscan_proc || coex_stat->bt_hid_exist)
+ table_case = 33;
+ else if (coex_stat->bt_setup_link || coex_stat->bt_inq_page)
+ table_case = 0;
+ else if (coex_stat->bt_a2dp_exist)
+ table_case = 34;
+ else
+ table_case = 33;
+
+ tdma_case = 0;
+ } else {
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+
+ table_case = 121;
+ }
+
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+ } else {
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ }
+
+ rtw_coex_table(rtwdev, false, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
{
struct rtw_coex *coex = &rtwdev->coex;
@@ -1816,13 +1913,8 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
if (efuse->share_ant) {
/* Shared-Ant */
- if (coex_stat->bt_multi_link) {
- table_case = 10;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 5;
- }
+ table_case = 10;
+ tdma_case = 5;
} else {
/* Non-Shared-Ant */
if (coex_stat->bt_multi_link) {
@@ -2224,8 +2316,10 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
{
+ struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2235,6 +2329,9 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+ if (coex_stat->bt_game_hid_exist && coex_stat->wl_linkscan_proc)
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+
if (efuse->share_ant) {
/* Shared-Ant */
table_case = 0;
@@ -2278,6 +2375,7 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
if (coex->under_5g)
@@ -2286,7 +2384,6 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
@@ -2298,6 +2395,16 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
tdma_case = 100;
}
+ if (coex_stat->bt_game_hid_exist) {
+ coex_stat->wl_coex_mode = COEX_WLINK_2GFREE;
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[6]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[5]);
+ } else {
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ }
+
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
@@ -2422,6 +2529,7 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
{
struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
bool rf4ce_en = false;
@@ -2494,6 +2602,11 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
goto exit;
}
+ if (coex_stat->bt_game_hid_exist && coex_stat->wl_connected) {
+ rtw_coex_action_bt_game_hid(rtwdev);
+ goto exit;
+ }
+
if (coex_stat->bt_whck_test) {
rtw_coex_action_bt_whql_test(rtwdev);
goto exit;
@@ -2530,6 +2643,18 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
}
exit:
+
+ if (chip->wl_mimo_ps_support) {
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ if (coex_dm->reason == COEX_RSN_2GMEDIA)
+ rtw_coex_mimo_ps(rtwdev, true, true);
+ else
+ rtw_coex_mimo_ps(rtwdev, false, true);
+ } else {
+ rtw_coex_mimo_ps(rtwdev, false, false);
+ }
+ }
+
rtw_coex_gnt_workaround(rtwdev, false, coex_stat->wl_coex_mode);
rtw_coex_limited_wl(rtwdev);
}
@@ -3139,6 +3264,135 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
}
+#define COEX_BT_HIDINFO_MTK 0x46
+static const u8 coex_bt_hidinfo_ps[] = {0x57, 0x69, 0x72};
+static const u8 coex_bt_hidinfo_xb[] = {0x58, 0x62, 0x6f};
+
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_hid *hidinfo;
+ struct rtw_coex_hid_info_a *hida;
+ struct rtw_coex_hid_handle_list *hl, *bhl;
+ u8 sub_id = buf[2], gamehid_cnt = 0, handle, i;
+ bool cur_game_hid_exist, complete;
+
+ if (!chip->wl_mimo_ps_support &&
+ (sub_id == COEX_BT_HIDINFO_LIST || sub_id == COEX_BT_HIDINFO_A))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], HID info notify, sub_id = 0x%x\n", sub_id);
+
+ switch (sub_id) {
+ case COEX_BT_HIDINFO_LIST:
+ hl = &coex_stat->hid_handle_list;
+ bhl = (struct rtw_coex_hid_handle_list *)buf;
+ if (!memcmp(hl, bhl, sizeof(*hl)))
+ return;
+ coex_stat->hid_handle_list = *bhl;
+ memset(&coex_stat->hid_info, 0, sizeof(coex_stat->hid_info));
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ if (hl->handle[i] != COEX_BT_HIDINFO_NOTCON &&
+ hl->handle[i] != 0)
+ hidinfo->hid_handle = hl->handle[i];
+ }
+ break;
+ case COEX_BT_HIDINFO_A:
+ hida = (struct rtw_coex_hid_info_a *)buf;
+ handle = hida->handle;
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ if (hidinfo->hid_handle == handle) {
+ hidinfo->hid_vendor = hida->vendor;
+ memcpy(hidinfo->hid_name, hida->name,
+ sizeof(hidinfo->hid_name));
+ hidinfo->hid_info_completed = true;
+ break;
+ }
+ }
+ break;
+ }
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ complete = hidinfo->hid_info_completed;
+ handle = hidinfo->hid_handle;
+ if (!complete || handle == COEX_BT_HIDINFO_NOTCON ||
+ handle == 0 || handle >= COEX_BT_BLE_HANDLE_THRS) {
+ hidinfo->is_game_hid = false;
+ continue;
+ }
+
+ if (hidinfo->hid_vendor == COEX_BT_HIDINFO_MTK) {
+ if ((memcmp(hidinfo->hid_name,
+ coex_bt_hidinfo_ps,
+ COEX_BT_HIDINFO_NAME)) == 0)
+ hidinfo->is_game_hid = true;
+ else if ((memcmp(hidinfo->hid_name,
+ coex_bt_hidinfo_xb,
+ COEX_BT_HIDINFO_NAME)) == 0)
+ hidinfo->is_game_hid = true;
+ else
+ hidinfo->is_game_hid = false;
+ } else {
+ hidinfo->is_game_hid = false;
+ }
+ if (hidinfo->is_game_hid)
+ gamehid_cnt++;
+ }
+
+ if (gamehid_cnt > 0)
+ cur_game_hid_exist = true;
+ else
+ cur_game_hid_exist = false;
+
+ if (cur_game_hid_exist != coex_stat->bt_game_hid_exist) {
+ coex_stat->bt_game_hid_exist = cur_game_hid_exist;
+ rtw_dbg(rtwdev, RTW_DBG_COEX,
+ "[BTCoex], HID info changed!bt_game_hid_exist = %d!\n",
+ coex_stat->bt_game_hid_exist);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS);
+ }
+}
+
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_hid *hidinfo;
+ u8 i, handle;
+ bool complete;
+
+ if (!chip->wl_mimo_ps_support || coex_stat->wl_under_ips ||
+ (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl))
+ return;
+
+ if (!coex_stat->bt_hid_exist &&
+ !((coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION) &&
+ (coex_stat->hi_pri_tx + coex_stat->hi_pri_rx >
+ COEX_BT_GAMEHID_CNT)))
+ return;
+
+ rtw_fw_coex_query_hid_info(rtwdev, COEX_BT_HIDINFO_LIST, 0);
+
+ for (i = 0; i < COEX_BT_HIDINFO_HANDLE_NUM; i++) {
+ hidinfo = &coex_stat->hid_info[i];
+ complete = hidinfo->hid_info_completed;
+ handle = hidinfo->hid_handle;
+ if (handle == 0 || handle == COEX_BT_HIDINFO_NOTCON ||
+ handle >= COEX_BT_BLE_HANDLE_THRS || complete)
+ continue;
+
+ rtw_fw_coex_query_hid_info(rtwdev,
+ COEX_BT_HIDINFO_A,
+ handle);
+ }
+}
+
void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
struct rtw_coex *coex = &rtwdev->coex;
@@ -3175,6 +3429,17 @@ void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type)
rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
}
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ if ((coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) ||
+ coex_stat->wl_under_ips)
+ return;
+
+ rtw_coex_monitor_bt_ctr(rtwdev);
+}
+
void rtw_coex_bt_relink_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
@@ -3637,6 +3902,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
switch (coex_wl_link_mode) {
case_WLINK(2G1PORT);
case_WLINK(5G);
+ case_WLINK(2GFREE);
default:
return "Unknown";
}
@@ -3658,7 +3924,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
u16 score_board_WB, score_board_BW;
u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc;
u32 lte_coex, bt_coex;
- u32 bt_hi_pri, bt_lo_pri;
int i;
score_board_BW = rtw_coex_read_scbd(rtwdev);
@@ -3669,17 +3934,6 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
wl_reg_6cc = rtw_read32(rtwdev, REG_BT_COEX_TABLE_H);
wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL);
- bt_hi_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS);
- bt_lo_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1);
- rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL,
- BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN);
-
- coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, bt_hi_pri);
- coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, bt_hi_pri);
-
- coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, bt_lo_pri);
- coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, bt_lo_pri);
-
sys_lte = rtw_read8(rtwdev, 0x73);
lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index fc61a0cab3e4..07fa7aa34d4b 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -11,6 +11,7 @@
#define COEX_MIN_DELAY 10 /* delay unit in ms */
#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */
+#define COEX_BT_GAMEHID_CNT 800
#define COEX_RF_OFF 0x0
#define COEX_RF_ON 0x1
@@ -172,6 +173,7 @@ enum coex_bt_profile {
enum coex_wl_link_mode {
COEX_WLINK_2G1PORT = 0x0,
COEX_WLINK_5G = 0x3,
+ COEX_WLINK_2GFREE = 0x7,
COEX_WLINK_MAX
};
@@ -401,9 +403,12 @@ void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
+void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type);
+void rtw_coex_wl_status_check(struct rtw_dev *rtwdev);
+void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev);
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index e429428232c1..1a52ff585fbc 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -390,7 +390,7 @@ static ssize_t rtw_debugfs_set_h2c(struct file *filp,
&param[0], &param[1], &param[2], &param[3],
&param[4], &param[5], &param[6], &param[7]);
if (num != 8) {
- rtw_info(rtwdev, "invalid H2C command format for debug\n");
+ rtw_warn(rtwdev, "invalid H2C command format for debug\n");
return -EINVAL;
}
@@ -715,8 +715,10 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel);
seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width);
seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]);
- seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n\n",
+ seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n",
stats->tx_throughput, stats->rx_throughput);
+ seq_printf(m, "1SS for TX and RX = %c\n\n", rtwdev->hal.txrx_1ss ?
+ 'Y' : 'N');
seq_puts(m, "==========[Tx Phy Info]========\n");
seq_puts(m, "[Tx Rate] = ");
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 61f8369fe2d6..066792dd96af 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -23,6 +23,7 @@ enum rtw_debug_mask {
RTW_DBG_PATH_DIV = 0x00004000,
RTW_DBG_ADAPTIVITY = 0x00008000,
RTW_DBG_HW_SCAN = 0x00010000,
+ RTW_DBG_STATE = 0x00020000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 2f7c036f9022..aa2aeb5fb2cc 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -233,6 +233,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
break;
+ case C2H_BT_HID_INFO:
+ rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
+ break;
case C2H_WLAN_INFO:
rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
break;
@@ -538,6 +541,18 @@ void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
+
+ SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
+ SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
@@ -1784,9 +1799,9 @@ void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
- struct sk_buff_head *list,
- struct rtw_vif *rtwvif)
+static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct sk_buff_head *list, u8 *bands,
+ struct rtw_vif *rtwvif)
{
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1797,19 +1812,24 @@ static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
if (!(BIT(idx) & chip->band))
continue;
new = skb_copy(skb, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
skb_put_data(new, ies->ies[idx], ies->len[idx]);
skb_put_data(new, ies->common_ies, ies->common_ie_len);
skb_queue_tail(list, new);
+ (*bands)++;
}
+
+ return 0;
}
-static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
+static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
struct sk_buff_head *probe_req_list)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb, *tmp;
u8 page_offset = 1, *buf, page_size = chip->page_size;
- u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT;
+ u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
u16 buf_offset = page_size * page_offset;
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
@@ -1848,6 +1868,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
rtwdev->scan_info.probe_pg_size = page_offset;
out:
kfree(buf);
+ skb_queue_walk_safe(probe_req_list, skb, tmp)
+ kfree_skb(skb);
return ret;
}
@@ -1857,8 +1879,9 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct sk_buff_head list;
- struct sk_buff *skb;
- u8 num = req->n_ssids, i;
+ struct sk_buff *skb, *tmp;
+ u8 num = req->n_ssids, i, bands = 0;
+ int ret;
skb_queue_head_init(&list);
for (i = 0; i < num; i++) {
@@ -1866,11 +1889,25 @@ static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
- rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
+ rtwvif);
+ if (ret)
+ goto out;
+
kfree_skb(skb);
}
- return _rtw_hw_scan_update_probe_req(rtwdev, num, &list);
+ return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
+
+out:
+ skb_queue_walk_safe(&list, skb, tmp)
+ kfree_skb(skb);
+
+ return ret;
}
static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
@@ -2022,7 +2059,7 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwdev->hal.rcr |= BIT_CBSSID_BCN;
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, true);
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
@@ -2109,7 +2146,7 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_hw_scan_complete(rtwdev, vif, aborted);
if (aborted)
- rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc);
+ rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
}
void rtw_store_op_chan(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 654c3c2e5721..b59d2cbad5d7 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -47,6 +47,7 @@ enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
C2H_BT_INFO = 0x09,
C2H_BT_MP_INFO = 0x0b,
+ C2H_BT_HID_INFO = 0x45,
C2H_RA_RPT = 0x0c,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_WLAN_INFO = 0x27,
@@ -529,6 +530,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_QUERY_BT_MP_INFO 0x67
#define H2C_CMD_BT_WIFI_CONTROL 0x69
#define H2C_CMD_WIFI_CALIBRATION 0x6d
+#define H2C_CMD_QUERY_BT_HID_INFO 0x73
#define H2C_CMD_KEEP_ALIVE 0x03
#define H2C_CMD_DISCONNECT_DECISION 0x04
@@ -681,6 +683,11 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
#define SET_KEEP_ALIVE_ENABLE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define SET_KEEP_ALIVE_ADOPT(h2c_pkt, value) \
@@ -780,6 +787,8 @@ void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl);
void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable);
void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
u8 para1, u8 para2, u8 para3, u8 para4, u8 para5);
+void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data);
+
void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index ae7d97de5fdf..5cdc54c9a9aa 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -72,6 +72,9 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
struct rtw_dev *rtwdev = hw->priv;
int ret = 0;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
@@ -205,7 +208,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
return 0;
}
@@ -216,7 +219,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,8 +245,8 @@ static int rtw_ops_change_interface(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
- rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
- vif->addr, vif->type, type, vif->p2p, p2p);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+ vif->addr, vif->type, type, vif->p2p, p2p);
rtw_ops_remove_interface(hw, vif);
@@ -614,7 +617,7 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 38252113c4a8..8b9899e41b0b 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -207,6 +207,9 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+ rtw_coex_wl_status_check(rtwdev);
+ rtw_coex_query_bt_hid_list(rtwdev);
+
if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev, 0);
@@ -272,6 +275,15 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+static void rtw_ips_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
@@ -305,8 +317,8 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
rtwdev->sta_cnt++;
rtwdev->beacon_loss = false;
- rtw_info(rtwdev, "sta %pM joined with macid %d\n",
- sta->addr, si->mac_id);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
return 0;
}
@@ -327,8 +339,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
kfree(si->mask);
rtwdev->sta_cnt--;
- rtw_info(rtwdev, "sta %pM with macid %d left\n",
- sta->addr, si->mac_id);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
}
struct rtw_fwcd_hdr {
@@ -1011,37 +1023,52 @@ static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
RA_MASK_VHT_RATES_2SS | \
RA_MASK_VHT_RATES_3SS)
+#define RA_MASK_CCK_IN_BG 0x00005
#define RA_MASK_CCK_IN_HT 0x00005
#define RA_MASK_CCK_IN_VHT 0x00005
#define RA_MASK_OFDM_IN_VHT 0x00010
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
-static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
- struct rtw_sta_info *si,
- u64 ra_mask, bool is_vht_enable,
- u8 wireless_set)
+static u64 rtw_rate_mask_rssi(struct rtw_sta_info *si, u8 wireless_set)
+{
+ u8 rssi_level = si->rssi_level;
+
+ if (wireless_set == WIRELESS_CCK)
+ return 0xffffffffffffffffULL;
+
+ if (rssi_level == 0)
+ return 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ return 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ return 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ return 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ return 0xffffffffffff8f80ULL;
+ else
+ return 0xffffffffffff0f00ULL;
+}
+
+static u64 rtw_rate_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
+static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable)
{
struct rtw_hal *hal = &rtwdev->hal;
const struct cfg80211_bitrate_mask *mask = si->mask;
u64 cfg_mask = GENMASK_ULL(63, 0);
- u8 rssi_level, band;
-
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ u8 band;
if (!si->use_cfg_mask)
return ra_mask;
@@ -1091,6 +1118,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
u8 ldpc_en = 0;
u8 tx_num = 1;
u64 ra_mask = 0;
+ u64 ra_mask_bak = 0;
bool is_vht_enable = false;
bool is_support_sgi = false;
@@ -1110,11 +1138,12 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
ldpc_en = HT_LDPC_EN;
}
- if (efuse->hw_cap.nss == 1)
+ if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ra_mask_bak = ra_mask;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
@@ -1127,6 +1156,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
dm_info->rrsr_val_init = RRSR_INIT_5G;
} else if (hal->current_band_type == RTW_BAND_2G) {
ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
+ ra_mask_bak = ra_mask;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
RA_MASK_OFDM_IN_VHT;
@@ -1140,11 +1170,13 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
} else if (sta->supp_rates[0] <= 0xf) {
wireless_set = WIRELESS_CCK;
} else {
+ ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
}
dm_info->rrsr_val_init = RRSR_INIT_2G;
} else {
rtw_err(rtwdev, "Unknown band type\n");
+ ra_mask_bak = ra_mask;
wireless_set = 0;
}
@@ -1176,8 +1208,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
- wireless_set);
+ ra_mask &= rtw_rate_mask_rssi(si, wireless_set);
+ ra_mask = rtw_rate_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask = rtw_rate_mask_cfg(rtwdev, si, ra_mask, is_vht_enable);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
@@ -1339,7 +1372,8 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
}
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan)
{
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
@@ -1354,6 +1388,9 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+
+ if (rtwvif->net_type == RTW_NET_NO_LINK && hw_scan)
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
}
int rtw_core_start(struct rtw_dev *rtwdev)
@@ -1536,6 +1573,37 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw,
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
+static void rtw_vif_smps_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ if (rtwdev->hal.txrx_1ss)
+ ieee80211_request_smps(vif, IEEE80211_SMPS_STATIC);
+ else
+ ieee80211_request_smps(vif, IEEE80211_SMPS_OFF);
+}
+
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss)
+ return;
+
+ rtwdev->hal.txrx_1ss = txrx_1ss;
+ if (txrx_1ss)
+ chip->ops->config_txrx_mode(rtwdev, BB_PATH_A, BB_PATH_A, false);
+ else
+ chip->ops->config_txrx_mode(rtwdev, hal->antenna_tx,
+ hal->antenna_rx, false);
+ rtw_iterate_vifs_atomic(rtwdev, rtw_vif_smps_iter, rtwdev);
+}
+
static void __update_firmware_feature(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
@@ -1919,6 +1987,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index dc1cd9bd4b8a..17815af9dd4e 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -874,6 +874,8 @@ struct rtw_chip_ops {
enum rtw_bb_path tx_path_1ss,
enum rtw_bb_path tx_path_cck,
bool is_tx2_path);
+ void (*config_txrx_mode)(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
/* for coex */
void (*coex_set_init)(struct rtw_dev *rtwdev);
@@ -1240,6 +1242,7 @@ struct rtw_chip_info {
bool scbd_support;
bool new_scbd10_def; /* true: fix 2M(8822c) */
bool ble_hid_profile_support;
+ bool wl_mimo_ps_support;
u8 pstdma_type; /* 0: LPSoff, 1:LPSon */
u8 bt_rssi_type;
u8 ant_isolation;
@@ -1352,6 +1355,42 @@ struct rtw_coex_dm {
#define COEX_BTINFO_LENGTH_MAX 10
#define COEX_BTINFO_LENGTH 7
+#define COEX_BT_HIDINFO_LIST 0x0
+#define COEX_BT_HIDINFO_A 0x1
+#define COEX_BT_HIDINFO_NAME 3
+
+#define COEX_BT_HIDINFO_LENGTH 6
+#define COEX_BT_HIDINFO_HANDLE_NUM 4
+#define COEX_BT_HIDINFO_C2H_HANDLE 0
+#define COEX_BT_HIDINFO_C2H_VENDOR 1
+#define COEX_BT_BLE_HANDLE_THRS 0x10
+#define COEX_BT_HIDINFO_NOTCON 0xff
+
+struct rtw_coex_hid {
+ u8 hid_handle;
+ u8 hid_vendor;
+ u8 hid_name[COEX_BT_HIDINFO_NAME];
+ bool hid_info_completed;
+ bool is_game_hid;
+};
+
+struct rtw_coex_hid_handle_list {
+ u8 cmd_id;
+ u8 len;
+ u8 subid;
+ u8 handle_cnt;
+ u8 handle[COEX_BT_HIDINFO_HANDLE_NUM];
+} __packed;
+
+struct rtw_coex_hid_info_a {
+ u8 cmd_id;
+ u8 len;
+ u8 subid;
+ u8 handle;
+ u8 vendor;
+ u8 name[COEX_BT_HIDINFO_NAME];
+} __packed;
+
struct rtw_coex_stat {
bool bt_disabled;
bool bt_disabled_pre;
@@ -1382,6 +1421,8 @@ struct rtw_coex_stat {
bool bt_slave;
bool bt_418_hid_exist;
bool bt_ble_hid_exist;
+ bool bt_game_hid_exist;
+ bool bt_hid_handle_cnt;
bool bt_mailbox_reply;
bool wl_under_lps;
@@ -1402,6 +1443,7 @@ struct rtw_coex_stat {
bool wl_connecting;
bool wl_slot_toggle;
bool wl_slot_toggle_change; /* if toggle to no-toggle */
+ bool wl_mimo_ps;
u32 bt_supported_version;
u32 bt_supported_feature;
@@ -1459,6 +1501,9 @@ struct rtw_coex_stat {
u32 darfrc;
u32 darfrch;
+
+ struct rtw_coex_hid hid_info[COEX_BT_HIDINFO_HANDLE_NUM];
+ struct rtw_coex_hid_handle_list hid_handle_list;
};
struct rtw_coex {
@@ -1867,6 +1912,7 @@ struct rtw_hal {
u32 antenna_tx;
u32 antenna_rx;
u8 bfee_sts_cap;
+ bool txrx_1ss;
/* protect tx power section */
struct mutex tx_power_mutex;
@@ -1960,6 +2006,7 @@ struct rtw_dev {
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct work_struct fw_recovery_work;
/* used to protect txqs list */
@@ -2101,7 +2148,8 @@ void rtw_tx_report_purge_timer(struct timer_list *t);
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan);
int rtw_core_start(struct rtw_dev *rtwdev);
void rtw_core_stop(struct rtw_dev *rtwdev);
int rtw_chip_info_setup(struct rtw_dev *rtwdev);
@@ -2121,5 +2169,5 @@ void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
u32 fwcd_item);
int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
-
+void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 3fdbaf7302c5..ad2b323a0423 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2753,6 +2753,7 @@ struct rtw_chip_info rtw8723d_hw_spec = {
.scbd_support = true,
.new_scbd10_def = true,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index db078df63f85..99eee128ae94 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -499,7 +499,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
}
if (lna_idx >= lna_gain_table_size) {
- rtw_info(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
return -120;
}
@@ -1514,6 +1514,7 @@ static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
[4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [6] = RTW_DEF_RFE(8821c, 0, 0),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
@@ -1924,6 +1925,7 @@ struct rtw_chip_info rtw8821c_hw_spec = {
.scbd_support = true,
.new_scbd10_def = false,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index dd4fbb82750d..eee7bf035403 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1012,12 +1012,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2554,6 +2554,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.scbd_support = true,
.new_scbd10_def = false,
.ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_RATIO,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 35c46e5209de..cd74607a61a2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2798,7 +2798,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
+ rtw_warn(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
@@ -2808,7 +2808,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
+ rtw_warn(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2996,19 +2996,34 @@ static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
* enable "DAC off if GNT_WL = 0" for non-shared-antenna
* disable 0x1c30[22] = 0,
* enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1
- *
- * disable WL-S1 BB chage RF mode if GNT_BT
+ */
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+ BIT_ANAPAR_BTPS >> 16, 0);
+ } else {
+ rtw_write8_mask(rtwdev, REG_ANAPAR + 2,
+ BIT_ANAPAR_BTPS >> 16, 1);
+ rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1,
+ BIT_DAC_OFF_ENABLE, 0);
+ rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3,
+ BIT_DAC_OFF_ENABLE, 1);
+ }
+
+ /* disable WL-S1 BB chage RF mode if GNT_BT
* since RF TRx mask can do it
*/
- rtw_write8_mask(rtwdev, REG_ANAPAR + 2, BIT_ANAPAR_BTPS >> 16, 1);
- rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1, BIT_DAC_OFF_ENABLE, 0);
- rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3, BIT_DAC_OFF_ENABLE, 1);
- rtw_write8_mask(rtwdev, REG_IGN_GNTBT4, BIT_PI_IGNORE_GNT_BT, 1);
+ rtw_write8_mask(rtwdev, REG_IGN_GNTBT4,
+ BIT_PI_IGNORE_GNT_BT, 1);
/* disable WL-S0 BB chage RF mode if wifi is at 5G,
* or antenna path is separated
*/
- if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
+ if (coex_stat->wl_coex_mode == COEX_WLINK_2GFREE) {
+ rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
+ BIT_PI_IGNORE_GNT_BT, 1);
+ rtw_write8_mask(rtwdev, REG_NOMASK_TXBT,
+ BIT_NOMASK_TXBT_ENABLE, 1);
+ } else if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
coex->under_5g || !efuse->share_ant) {
if (coex_stat->kt_ver >= 3) {
rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1,
@@ -4962,6 +4977,7 @@ static struct rtw_chip_ops rtw8822c_ops = {
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
+ .config_txrx_mode = rtw8822c_config_trx_mode,
.coex_set_init = rtw8822c_coex_cfg_init,
.coex_set_ant_switch = NULL,
@@ -5007,6 +5023,8 @@ static const struct coex_table_para table_sant_8822c[] = {
{0x66556aaa, 0x6a5a6aaa}, /*case-30*/
{0xffffffff, 0x5aaa5aaa},
{0x56555555, 0x5a5a5aaa},
+ {0xdaffdaff, 0xdaffdaff},
+ {0xddffddff, 0xddffddff},
};
/* Non-Shared-Antenna Coex Table */
@@ -5107,7 +5125,8 @@ static const struct coex_rf_para rf_para_tx_8822c[] = {
{8, 17, true, 4},
{7, 18, true, 4},
{6, 19, true, 4},
- {5, 20, true, 4}
+ {5, 20, true, 4},
+ {0, 21, true, 4} /* for gamg hid */
};
static const struct coex_rf_para rf_para_rx_8822c[] = {
@@ -5116,7 +5135,8 @@ static const struct coex_rf_para rf_para_rx_8822c[] = {
{3, 24, true, 5},
{2, 26, true, 5},
{1, 27, true, 5},
- {0, 28, true, 5}
+ {0, 28, true, 5},
+ {0, 28, true, 5} /* for gamg hid */
};
static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
@@ -5354,11 +5374,12 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.wowlan_stub = &rtw_wowlan_stub_8822c,
.max_sched_scan_ssids = 4,
#endif
- .coex_para_ver = 0x2103181c,
- .bt_desired_ver = 0x1c,
+ .coex_para_ver = 0x22020720,
+ .bt_desired_ver = 0x20,
.scbd_support = true,
.new_scbd10_def = true,
.ble_hid_profile_support = true,
+ .wl_mimo_ps_support = true,
.pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
.bt_rssi_type = COEX_BTRSSI_DBM,
.ant_isolation = 15,
diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
index 3383726c4d90..c472f1502b82 100644
--- a/drivers/net/wireless/realtek/rtw88/sar.c
+++ b/drivers/net/wireless/realtek/rtw88/sar.c
@@ -91,10 +91,10 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
return -EINVAL;
power = sar->sub_specs[i].power;
- rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
- rtw_common_sar_freq_ranges[idx].start_freq,
- rtw_common_sar_freq_ranges[idx].end_freq,
- power, BIT(RTW_COMMON_SAR_FCT));
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "On freq %u to %u, set SAR %d in 1/%lu dBm\n",
+ rtw_common_sar_freq_ranges[idx].start_freq,
+ rtw_common_sar_freq_ranges[idx].end_freq,
+ power, BIT(RTW_COMMON_SAR_FCT));
for (j = 0; j < RTW_RF_PATH_MAX; j++) {
for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index efcc1b0371a8..94d1089f4022 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -353,7 +353,7 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
bw = si->bw_mode;
rate_id = si->rate_id;
- stbc = si->stbc_en;
+ stbc = rtwdev->hal.txrx_1ss ? false : si->stbc_en;
ldpc = si->ldpc_en;
out:
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 37e5def24d9f..dd02b6a6790e 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -16,11 +16,15 @@ config RTW89_CORE
config RTW89_PCI
tristate
+config RTW89_8852A
+ tristate
+
config RTW89_8852AE
tristate "Realtek 8852AE PCI wireless network adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
+ select RTW89_8852A
help
Select this option will enable support for 8852AE chipset
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 077e8fe23f60..012ae60c0b81 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -6,10 +6,6 @@ rtw89_core-y += core.o \
mac.o \
phy.o \
fw.o \
- rtw8852a.o \
- rtw8852a_table.o \
- rtw8852a_rfk.o \
- rtw8852a_rfk_table.o \
cam.o \
efuse.o \
regd.o \
@@ -18,6 +14,15 @@ rtw89_core-y += core.o \
ps.o \
ser.o
+obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
+rtw89_8852a-objs := rtw8852a.o \
+ rtw8852a_table.o \
+ rtw8852a_rfk.o \
+ rtw8852a_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
+rtw89_8852ae-objs := rtw8852ae.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index bd34e4bbe107..305dbbebff6b 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -231,7 +231,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
@@ -387,7 +387,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
sec_cam = addr_cam->sec_entries[key_idx];
if (!sec_cam)
return -EINVAL;
@@ -427,15 +427,23 @@ static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw,
rtw89_cam_deinit(rtwdev, rtwvif);
}
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ addr_cam->valid = false;
+ clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
+}
+
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
- addr_cam->valid = false;
+ rtw89_cam_deinit_addr_cam(rtwdev, addr_cam);
bssid_cam->valid = false;
- clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
}
@@ -464,10 +472,10 @@ static int rtw89_cam_get_avail_addr_cam(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam)
{
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
u8 addr_cam_idx;
int i;
int ret;
@@ -484,14 +492,17 @@ static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
addr_cam->valid = true;
addr_cam->addr_mask = 0;
addr_cam->mask_sel = RTW89_NO_MSK;
+ addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
bitmap_zero(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
- ether_addr_copy(addr_cam->sma, rtwvif->mac_addr);
for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
addr_cam->sec_ent_keyid[i] = 0;
addr_cam->sec_ent[i] = 0;
}
+ /* associate addr cam with bssid cam */
+ addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
+
return 0;
}
@@ -549,21 +560,18 @@ int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
int ret;
- ret = rtw89_cam_init_addr_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
if (ret) {
- rtw89_err(rtwdev, "failed to init addr cam\n");
+ rtw89_err(rtwdev, "failed to init bssid cam\n");
return ret;
}
- ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_addr_cam(rtwdev, addr_cam, bssid_cam);
if (ret) {
- rtw89_err(rtwdev, "failed to init bssid cam\n");
+ rtw89_err(rtwdev, "failed to init addr cam\n");
return ret;
}
- /* associate addr cam with bssid cam */
- addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
-
return 0;
}
@@ -609,7 +617,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr;
u8 sma_hash, tma_hash, addr_msk_start;
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 33a3ad582b81..3a6a786530d1 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -346,6 +346,11 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam);
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam);
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif,
struct rtw89_sta *rtwsta,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 9f7d4f8d0c56..684583955511 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -594,7 +594,7 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
memset(&btc->dm, 0, sizeof(btc->dm));
memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++)
+ for (i = 0; i < RTW89_PORT_NUM; i++)
memset(wl_linfo[i].rssi_state, 0,
sizeof(wl_linfo[i].rssi_state));
@@ -1478,7 +1478,7 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
}
}
- rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+ rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
#define BTC_TDMA_WLROLE_MAX 2
@@ -1698,7 +1698,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
en = true;
/* get p2p channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
if (wl_rinfo->active_role[i].role ==
RTW89_WIFI_ROLE_P2P_GO ||
wl_rinfo->active_role[i].role ==
@@ -1711,7 +1711,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else {
en = true;
/* get 2g channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
if (wl_rinfo->active_role[i].connected &&
wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
ch = wl_rinfo->active_role[i].ch;
@@ -2233,7 +2233,7 @@ static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
}
}
- rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+ rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
@@ -2300,7 +2300,7 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
switch (type) {
case BTC_ANT_WPOWERON:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, false);
break;
case BTC_ANT_WINIT:
if (bt->enable.now) {
@@ -2310,21 +2310,21 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
}
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
break;
case BTC_ANT_WONLY:
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WOFF:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, false);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W2G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
if (rtwdev->dbcc_en) {
for (i = 0; i < RTW89_PHY_MAX; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
@@ -2352,32 +2352,32 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
}
break;
case BTC_ANT_W5G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W25G:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
break;
case BTC_ANT_FREERUN:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WRFK:
- rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, true);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_BRFK:
- rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, false);
_set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
_set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
@@ -3287,7 +3287,7 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
memset(wl_rinfo, 0, sizeof(*wl_rinfo));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
/* check if role active? */
if (!wl_linfo[i].active)
continue;
@@ -4370,6 +4370,7 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
rtwdev->is_bt_iqk_timeout = true;
}
}
+EXPORT_SYMBOL(rtw89_btc_ntfy_wl_rfk);
struct rtw89_btc_wl_sta_iter_data {
struct rtw89_dev *rtwdev;
@@ -4622,12 +4623,12 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
seq_printf(m, "(%s, desired:%d.%d.%d), ",
(wl->ver_info.fw_coex >= chip->wlcx_desired ?
- "Match" : "Mis-Match"), ver_main, ver_sub, ver_hotfix);
+ "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
bt->ver_info.fw_coex,
(bt->ver_info.fw_coex >= chip->btcx_desired ?
- "Match" : "Mis-Match"), chip->btcx_desired);
+ "Match" : "Mismatch"), chip->btcx_desired);
if (bt->enable.now && bt->ver_info.fw == 0)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -4676,7 +4677,7 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
wl_dinfo->real_band[RTW89_PHY_1]);
}
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
plink = &btc->cx.wl.link_info[i];
if (!plink->active)
@@ -5074,7 +5075,7 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
(BTC_CX_FW_OFFLOAD ? "Y" : "N"),
(dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
- "" : "(Mis-Match!!)"));
+ "" : "(Mismatch!!)"));
if (dm->rf_trx_para.wl_tx_power == 0xff)
seq_printf(m,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a0737eea9f81..bcefc968576e 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -4,6 +4,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
+#include "cam.h"
#include "coex.h"
#include "core.h"
#include "efuse.h"
@@ -21,50 +22,122 @@ static bool rtw89_disable_ps_mode;
module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
+#define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \
+ { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
+#define RTW89_DEF_CHAN_2G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
+#define RTW89_DEF_CHAN_5G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_6G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ)
+
static struct ieee80211_channel rtw89_channels_2ghz[] = {
- { .center_freq = 2412, .hw_value = 1, },
- { .center_freq = 2417, .hw_value = 2, },
- { .center_freq = 2422, .hw_value = 3, },
- { .center_freq = 2427, .hw_value = 4, },
- { .center_freq = 2432, .hw_value = 5, },
- { .center_freq = 2437, .hw_value = 6, },
- { .center_freq = 2442, .hw_value = 7, },
- { .center_freq = 2447, .hw_value = 8, },
- { .center_freq = 2452, .hw_value = 9, },
- { .center_freq = 2457, .hw_value = 10, },
- { .center_freq = 2462, .hw_value = 11, },
- { .center_freq = 2467, .hw_value = 12, },
- { .center_freq = 2472, .hw_value = 13, },
- { .center_freq = 2484, .hw_value = 14, },
+ RTW89_DEF_CHAN_2G(2412, 1),
+ RTW89_DEF_CHAN_2G(2417, 2),
+ RTW89_DEF_CHAN_2G(2422, 3),
+ RTW89_DEF_CHAN_2G(2427, 4),
+ RTW89_DEF_CHAN_2G(2432, 5),
+ RTW89_DEF_CHAN_2G(2437, 6),
+ RTW89_DEF_CHAN_2G(2442, 7),
+ RTW89_DEF_CHAN_2G(2447, 8),
+ RTW89_DEF_CHAN_2G(2452, 9),
+ RTW89_DEF_CHAN_2G(2457, 10),
+ RTW89_DEF_CHAN_2G(2462, 11),
+ RTW89_DEF_CHAN_2G(2467, 12),
+ RTW89_DEF_CHAN_2G(2472, 13),
+ RTW89_DEF_CHAN_2G(2484, 14),
};
static struct ieee80211_channel rtw89_channels_5ghz[] = {
- {.center_freq = 5180, .hw_value = 36,},
- {.center_freq = 5200, .hw_value = 40,},
- {.center_freq = 5220, .hw_value = 44,},
- {.center_freq = 5240, .hw_value = 48,},
- {.center_freq = 5260, .hw_value = 52,},
- {.center_freq = 5280, .hw_value = 56,},
- {.center_freq = 5300, .hw_value = 60,},
- {.center_freq = 5320, .hw_value = 64,},
- {.center_freq = 5500, .hw_value = 100,},
- {.center_freq = 5520, .hw_value = 104,},
- {.center_freq = 5540, .hw_value = 108,},
- {.center_freq = 5560, .hw_value = 112,},
- {.center_freq = 5580, .hw_value = 116,},
- {.center_freq = 5600, .hw_value = 120,},
- {.center_freq = 5620, .hw_value = 124,},
- {.center_freq = 5640, .hw_value = 128,},
- {.center_freq = 5660, .hw_value = 132,},
- {.center_freq = 5680, .hw_value = 136,},
- {.center_freq = 5700, .hw_value = 140,},
- {.center_freq = 5720, .hw_value = 144,},
- {.center_freq = 5745, .hw_value = 149,},
- {.center_freq = 5765, .hw_value = 153,},
- {.center_freq = 5785, .hw_value = 157,},
- {.center_freq = 5805, .hw_value = 161,},
- {.center_freq = 5825, .hw_value = 165,
- .flags = IEEE80211_CHAN_NO_HT40MINUS},
+ RTW89_DEF_CHAN_5G(5180, 36),
+ RTW89_DEF_CHAN_5G(5200, 40),
+ RTW89_DEF_CHAN_5G(5220, 44),
+ RTW89_DEF_CHAN_5G(5240, 48),
+ RTW89_DEF_CHAN_5G(5260, 52),
+ RTW89_DEF_CHAN_5G(5280, 56),
+ RTW89_DEF_CHAN_5G(5300, 60),
+ RTW89_DEF_CHAN_5G(5320, 64),
+ RTW89_DEF_CHAN_5G(5500, 100),
+ RTW89_DEF_CHAN_5G(5520, 104),
+ RTW89_DEF_CHAN_5G(5540, 108),
+ RTW89_DEF_CHAN_5G(5560, 112),
+ RTW89_DEF_CHAN_5G(5580, 116),
+ RTW89_DEF_CHAN_5G(5600, 120),
+ RTW89_DEF_CHAN_5G(5620, 124),
+ RTW89_DEF_CHAN_5G(5640, 128),
+ RTW89_DEF_CHAN_5G(5660, 132),
+ RTW89_DEF_CHAN_5G(5680, 136),
+ RTW89_DEF_CHAN_5G(5700, 140),
+ RTW89_DEF_CHAN_5G(5720, 144),
+ RTW89_DEF_CHAN_5G(5745, 149),
+ RTW89_DEF_CHAN_5G(5765, 153),
+ RTW89_DEF_CHAN_5G(5785, 157),
+ RTW89_DEF_CHAN_5G(5805, 161),
+ RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
+};
+
+static struct ieee80211_channel rtw89_channels_6ghz[] = {
+ RTW89_DEF_CHAN_6G(5955, 1),
+ RTW89_DEF_CHAN_6G(5975, 5),
+ RTW89_DEF_CHAN_6G(5995, 9),
+ RTW89_DEF_CHAN_6G(6015, 13),
+ RTW89_DEF_CHAN_6G(6035, 17),
+ RTW89_DEF_CHAN_6G(6055, 21),
+ RTW89_DEF_CHAN_6G(6075, 25),
+ RTW89_DEF_CHAN_6G(6095, 29),
+ RTW89_DEF_CHAN_6G(6115, 33),
+ RTW89_DEF_CHAN_6G(6135, 37),
+ RTW89_DEF_CHAN_6G(6155, 41),
+ RTW89_DEF_CHAN_6G(6175, 45),
+ RTW89_DEF_CHAN_6G(6195, 49),
+ RTW89_DEF_CHAN_6G(6215, 53),
+ RTW89_DEF_CHAN_6G(6235, 57),
+ RTW89_DEF_CHAN_6G(6255, 61),
+ RTW89_DEF_CHAN_6G(6275, 65),
+ RTW89_DEF_CHAN_6G(6295, 69),
+ RTW89_DEF_CHAN_6G(6315, 73),
+ RTW89_DEF_CHAN_6G(6335, 77),
+ RTW89_DEF_CHAN_6G(6355, 81),
+ RTW89_DEF_CHAN_6G(6375, 85),
+ RTW89_DEF_CHAN_6G(6395, 89),
+ RTW89_DEF_CHAN_6G(6415, 93),
+ RTW89_DEF_CHAN_6G(6435, 97),
+ RTW89_DEF_CHAN_6G(6455, 101),
+ RTW89_DEF_CHAN_6G(6475, 105),
+ RTW89_DEF_CHAN_6G(6495, 109),
+ RTW89_DEF_CHAN_6G(6515, 113),
+ RTW89_DEF_CHAN_6G(6535, 117),
+ RTW89_DEF_CHAN_6G(6555, 121),
+ RTW89_DEF_CHAN_6G(6575, 125),
+ RTW89_DEF_CHAN_6G(6595, 129),
+ RTW89_DEF_CHAN_6G(6615, 133),
+ RTW89_DEF_CHAN_6G(6635, 137),
+ RTW89_DEF_CHAN_6G(6655, 141),
+ RTW89_DEF_CHAN_6G(6675, 145),
+ RTW89_DEF_CHAN_6G(6695, 149),
+ RTW89_DEF_CHAN_6G(6715, 153),
+ RTW89_DEF_CHAN_6G(6735, 157),
+ RTW89_DEF_CHAN_6G(6755, 161),
+ RTW89_DEF_CHAN_6G(6775, 165),
+ RTW89_DEF_CHAN_6G(6795, 169),
+ RTW89_DEF_CHAN_6G(6815, 173),
+ RTW89_DEF_CHAN_6G(6835, 177),
+ RTW89_DEF_CHAN_6G(6855, 181),
+ RTW89_DEF_CHAN_6G(6875, 185),
+ RTW89_DEF_CHAN_6G(6895, 189),
+ RTW89_DEF_CHAN_6G(6915, 193),
+ RTW89_DEF_CHAN_6G(6935, 197),
+ RTW89_DEF_CHAN_6G(6955, 201),
+ RTW89_DEF_CHAN_6G(6975, 205),
+ RTW89_DEF_CHAN_6G(6995, 209),
+ RTW89_DEF_CHAN_6G(7015, 213),
+ RTW89_DEF_CHAN_6G(7035, 217),
+ RTW89_DEF_CHAN_6G(7055, 221),
+ RTW89_DEF_CHAN_6G(7075, 225),
+ RTW89_DEF_CHAN_6G(7095, 229),
+ RTW89_DEF_CHAN_6G(7115, 233),
};
static struct ieee80211_rate rtw89_bitrates[] = {
@@ -118,6 +191,16 @@ static struct ieee80211_supported_band rtw89_sband_5ghz = {
.vht_cap = {0},
};
+static struct ieee80211_supported_band rtw89_sband_6ghz = {
+ .band = NL80211_BAND_6GHZ,
+ .channels = rtw89_channels_6ghz,
+ .n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
+
+ /* 6G has no CCK rates, 1M/2M/5.5M/11M */
+ .bitrates = rtw89_bitrates + 4,
+ .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
+};
+
static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats,
struct sk_buff *skb, bool tx)
@@ -149,6 +232,9 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
u8 primary_chan_idx = 0;
+ u32 offset;
+ u8 band;
+ u8 subband;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
@@ -171,23 +257,16 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
}
break;
case NL80211_CHAN_WIDTH_80:
- bandwidth = RTW89_CHANNEL_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ bandwidth = nl_to_rtw89_bandwidth(width);
if (primary_freq > center_freq) {
- if (primary_freq - center_freq == 10) {
- primary_chan_idx = RTW89_SC_20_UPPER;
- center_chan -= 2;
- } else {
- primary_chan_idx = RTW89_SC_20_UPMOST;
- center_chan -= 6;
- }
+ offset = (primary_freq - center_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
+ center_chan -= 2 + offset * 4;
} else {
- if (center_freq - primary_freq == 10) {
- primary_chan_idx = RTW89_SC_20_LOWER;
- center_chan += 2;
- } else {
- primary_chan_idx = RTW89_SC_20_LOWEST;
- center_chan += 6;
- }
+ offset = (center_freq - primary_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
+ center_chan += 2 + offset * 4;
}
break;
default:
@@ -195,10 +274,81 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
break;
}
+ switch (channel->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ band = RTW89_BAND_2G;
+ break;
+ case NL80211_BAND_5GHZ:
+ band = RTW89_BAND_5G;
+ break;
+ case NL80211_BAND_6GHZ:
+ band = RTW89_BAND_6G;
+ break;
+ }
+
+ switch (band) {
+ default:
+ case RTW89_BAND_2G:
+ switch (center_chan) {
+ default:
+ case 1 ... 14:
+ subband = RTW89_CH_2G;
+ break;
+ }
+ break;
+ case RTW89_BAND_5G:
+ switch (center_chan) {
+ default:
+ case 36 ... 64:
+ subband = RTW89_CH_5G_BAND_1;
+ break;
+ case 100 ... 144:
+ subband = RTW89_CH_5G_BAND_3;
+ break;
+ case 149 ... 177:
+ subband = RTW89_CH_5G_BAND_4;
+ break;
+ }
+ break;
+ case RTW89_BAND_6G:
+ switch (center_chan) {
+ default:
+ case 1 ... 29:
+ subband = RTW89_CH_6G_BAND_IDX0;
+ break;
+ case 33 ... 61:
+ subband = RTW89_CH_6G_BAND_IDX1;
+ break;
+ case 65 ... 93:
+ subband = RTW89_CH_6G_BAND_IDX2;
+ break;
+ case 97 ... 125:
+ subband = RTW89_CH_6G_BAND_IDX3;
+ break;
+ case 129 ... 157:
+ subband = RTW89_CH_6G_BAND_IDX4;
+ break;
+ case 161 ... 189:
+ subband = RTW89_CH_6G_BAND_IDX5;
+ break;
+ case 193 ... 221:
+ subband = RTW89_CH_6G_BAND_IDX6;
+ break;
+ case 225 ... 253:
+ subband = RTW89_CH_6G_BAND_IDX7;
+ break;
+ }
+ break;
+ }
+
chan_param->center_chan = center_chan;
+ chan_param->center_freq = center_freq;
chan_param->primary_chan = channel->hw_value;
chan_param->bandwidth = bandwidth;
chan_param->pri_ch_idx = primary_chan_idx;
+ chan_param->band_type = band;
+ chan_param->subband_type = subband;
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
@@ -209,7 +359,6 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
struct rtw89_channel_params ch_param;
struct rtw89_channel_help_params bak;
u8 center_chan, bandwidth;
- u8 band_type;
bool band_changed;
rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
@@ -218,30 +367,17 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
center_chan = ch_param.center_chan;
bandwidth = ch_param.bandwidth;
- band_type = center_chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- band_changed = hal->current_band_type != band_type ||
+ band_changed = hal->current_band_type != ch_param.band_type ||
hal->current_channel == 0;
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
+ hal->current_freq = ch_param.center_freq;
hal->prev_primary_channel = hal->current_primary_channel;
+ hal->prev_band_type = hal->current_band_type;
hal->current_primary_channel = ch_param.primary_chan;
- hal->current_band_type = band_type;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->current_subband = RTW89_CH_2G;
- break;
- case 36 ... 64:
- hal->current_subband = RTW89_CH_5G_BAND_1;
- break;
- case 100 ... 144:
- hal->current_subband = RTW89_CH_5G_BAND_3;
- break;
- case 149 ... 177:
- hal->current_subband = RTW89_CH_5G_BAND_4;
- break;
- }
+ hal->current_band_type = ch_param.band_type;
+ hal->current_subband = ch_param.subband_type;
rtw89_chip_set_channel_prepare(rtwdev, &bak);
@@ -300,9 +436,11 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_sta *sta = tx_req->sta;
struct ieee80211_tx_info *info;
struct ieee80211_key_conf *key;
struct rtw89_vif *rtwvif;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_addr_cam_entry *addr_cam;
struct rtw89_sec_cam_entry *sec_cam;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
@@ -315,7 +453,7 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
info = IEEE80211_SKB_CB(skb);
key = info->control.hw_key;
@@ -377,14 +515,19 @@ static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
u8 qsel, ch_dma;
- qsel = RTW89_TX_QSEL_B0_MGMT;
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
- desc_info->qsel = RTW89_TX_QSEL_B0_MGMT;
+ desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
+ desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
/* fixed data rate for mgmt frames */
desc_info->en_wd_info = true;
@@ -520,6 +663,21 @@ desc_bk:
desc_info->bk = true;
}
+static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta;
+
+ if (!sta)
+ return rtwvif->mac_id;
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ return rtwsta->mac_id;
+}
+
static void
rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -535,12 +693,14 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
- qsel = rtw89_core_get_qsel(rtwdev, tid);
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
desc_info->qsel = qsel;
+ desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
@@ -596,11 +756,28 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
}
static void
+rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ if (!rtwdev->fw.tx_wake)
+ return;
+
+ if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ return;
+
+ if (tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
+ return;
+
+ rtw89_mac_notify_wake(rtwdev);
+}
+
+static void
rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
enum rtw89_core_tx_type tx_type;
enum btc_pkt_type pkt_type;
@@ -619,6 +796,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
desc_info->pkt_size = skb->len;
desc_info->is_bmc = is_bmc;
desc_info->wd_page = true;
+ desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM;
switch (tx_req->tx_type) {
case RTW89_CORE_TX_TYPE_MGMT:
@@ -691,6 +869,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+ rtw89_core_tx_wake(rtwdev, &tx_req);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -710,7 +890,9 @@ static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
- FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
+ FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode);
return cpu_to_le32(dword);
}
@@ -719,7 +901,8 @@ static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
- FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size);
+ FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) |
+ FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id);
return cpu_to_le32(dword);
}
@@ -737,7 +920,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
- FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
}
@@ -996,13 +1180,7 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
}
- if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
- bw = RATE_INFO_BW_80;
- else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
- bw = RATE_INFO_BW_40;
- else
- bw = RATE_INFO_BW_20;
-
+ bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false);
ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
status->rate_idx == rate_idx &&
@@ -1083,10 +1261,31 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
if (rx_status->band == NL80211_BAND_2GHZ ||
rx_status->encoding != RX_ENC_LEGACY)
return;
+
+ /* Some control frames' freq(ACKs in this case) are reported wrong due
+ * to FW notify timing, set to lowest rate to prevent overflow.
+ */
+ if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) {
+ rx_status->rate_idx = 0;
+ return;
+ }
+
/* No 4 CCK rates for non-2G */
rx_status->rate_idx -= 4;
}
+static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb_ppdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ rtw89_core_hw_to_sband_rate(rx_status);
+ rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ rtwdev->napi_budget_countdown--;
+}
+
static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -1106,10 +1305,7 @@ static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
rtw89_correct_cck_chan(rtwdev, rx_status);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status);
}
}
@@ -1250,6 +1446,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw89_hal *hal = &rtwdev->hal;
u16 data_rate;
u8 data_rate_mode;
@@ -1257,6 +1454,13 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
+ if (rtwdev->scanning && rtwdev->fw.scan_offload) {
+ rx_status->freq =
+ ieee80211_channel_to_frequency(hal->current_channel,
+ hal->current_band_type);
+ rx_status->band = rtwdev->hal.current_band_type;
+ }
+
if (desc_info->icv_err || desc_info->crc32_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -1264,12 +1468,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
!(desc_info->sw_dec || desc_info->icv_err))
rx_status->flag |= RX_FLAG_DECRYPTED;
- if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
- rx_status->bw = RATE_INFO_BW_80;
- else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
- rx_status->bw = RATE_INFO_BW_40;
- else
- rx_status->bw = RATE_INFO_BW_20;
+ rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
data_rate = desc_info->data_rate;
data_rate_mode = GET_DATA_RATE_MODE(data_rate);
@@ -1334,10 +1533,7 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status);
}
}
@@ -1364,14 +1560,10 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
if (desc_info->long_rxdesc &&
- BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) {
+ BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
- } else {
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
- }
+ else
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status);
}
EXPORT_SYMBOL(rtw89_core_rx);
@@ -1509,11 +1701,12 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
unsigned long i;
int ret;
+ rcu_read_lock();
for (i = 0; i < frame_cnt; i++) {
skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
if (!skb) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
- return;
+ goto out;
}
rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
@@ -1523,6 +1716,8 @@ static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
break;
}
}
+out:
+ rcu_read_unlock();
}
static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
@@ -1598,6 +1793,16 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
ieee80211_txq_schedule_end(hw, ac);
}
+static void rtw89_ips_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ ips_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static void rtw89_core_txq_work(struct work_struct *w)
{
struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
@@ -1770,6 +1975,51 @@ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
bitmap_zero(addr, nbits);
}
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_ba_cam_entry *entry;
+ u8 idx;
+
+ idx = rtw89_core_acquire_bit_map(rtwsta->ba_cam_map, RTW89_BA_CAM_NUM);
+ if (idx == RTW89_BA_CAM_NUM) {
+ /* allocate a static BA CAM to tid=0, so replace the existing
+ * one if BA CAM is full. Hardware will process the original tid
+ * automatically.
+ */
+ if (tid != 0)
+ return -ENOSPC;
+
+ idx = 0;
+ }
+
+ entry = &rtwsta->ba_cam_entry[idx];
+ entry->tid = tid;
+ *cam_idx = idx;
+
+ return 0;
+}
+
+int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_ba_cam_entry *entry;
+ int i;
+
+ for (i = 0; i < RTW89_BA_CAM_NUM; i++) {
+ if (!test_bit(i, rtwsta->ba_cam_map))
+ continue;
+
+ entry = &rtwsta->ba_cam_entry[i];
+ if (entry->tid != tid)
+ continue;
+
+ rtw89_core_release_bit_map(rtwsta->ba_cam_map, i);
+ *cam_idx = i;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
#define RTW89_TYPE_MAPPING(_type) \
case NL80211_IFTYPE_ ## _type: \
rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
@@ -1838,6 +2088,9 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
+ RTW89_MAX_MAC_ID_NUM);
}
return 0;
@@ -1866,8 +2119,11 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
rtw89_mac_bf_disassoc(rtwdev, vif, sta);
rtw89_core_free_sta_pending_ba(rtwdev, sta);
+ if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
- rtw89_vif_type_mapping(vif, false);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ rtw89_vif_type_mapping(vif, false);
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -1875,14 +2131,22 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 1);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
}
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+ }
+
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
@@ -1899,7 +2163,25 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int ret;
- rtw89_vif_type_mapping(vif, true);
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+
+ ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, &rtwvif->bssid_cam);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
+ return ret;
+ }
+ }
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -1907,7 +2189,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
@@ -1950,6 +2232,8 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
return 0;
}
@@ -1986,9 +2270,14 @@ static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
- static const __le16 highest[RF_PATH_MAX] = {
+ static const __le16 highest_bw80[RF_PATH_MAX] = {
cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
};
+ static const __le16 highest_bw160[RF_PATH_MAX] = {
+ cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
+ };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -2017,6 +2306,9 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (chip->support_bw160)
+ vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
@@ -2087,8 +2379,15 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
if (i == NL80211_IFTYPE_STATION)
mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bw160)
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
@@ -2117,6 +2416,9 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bw160)
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
@@ -2127,6 +2429,22 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bw160) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
+
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data[idx].he_6ghz_capa.capa = capa;
+ }
idx++;
}
@@ -2139,34 +2457,52 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
+ struct ieee80211_supported_band *sband_6ghz = NULL;
u32 size = sizeof(struct ieee80211_supported_band);
+ u8 support_bands = rtwdev->chip->support_bands;
- sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
- if (!sband_2ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ if (support_bands & BIT(NL80211_BAND_2GHZ)) {
+ sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
+ if (!sband_2ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ }
- sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
- if (!sband_5ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
- rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ if (support_bands & BIT(NL80211_BAND_5GHZ)) {
+ sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
+ if (!sband_5ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ }
+
+ if (support_bands & BIT(NL80211_BAND_6GHZ)) {
+ sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
+ if (!sband_6ghz)
+ goto err;
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
+ }
return 0;
err:
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
if (sband_2ghz)
kfree(sband_2ghz->iftype_data);
if (sband_5ghz)
kfree(sband_5ghz->iftype_data);
+ if (sband_6ghz)
+ kfree(sband_6ghz->iftype_data);
kfree(sband_2ghz);
kfree(sband_5ghz);
+ kfree(sband_6ghz);
return -ENOMEM;
}
@@ -2176,10 +2512,14 @@ static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
+ if (hw->wiphy->bands[NL80211_BAND_6GHZ])
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
}
static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
@@ -2192,6 +2532,21 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
}
+void rtw89_core_update_beacon_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev;
+ struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
+ update_beacon_work);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ rtwdev = rtwvif->rtwdev;
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2278,10 +2633,16 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
int ret;
+ u8 band;
INIT_LIST_HEAD(&rtwdev->ba_list);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwdev->early_h2c_list);
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ if (!(rtwdev->chip->support_bands & BIT(band)))
+ continue;
+ INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
+ }
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -2292,11 +2653,13 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
spin_lock_init(&rtwdev->ba_lock);
+ spin_lock_init(&rtwdev->rpwm_lock);
mutex_init(&rtwdev->mutex);
mutex_init(&rtwdev->rf_mutex);
rtwdev->total_sta_assoc = 0;
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
skb_queue_head_init(&rtwdev->c2h_queue);
rtw89_core_ppdu_sts_init(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
@@ -2332,12 +2695,48 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_deinit);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtwdev->scanning = true;
+ rtw89_leave_lps(rtwdev);
+ if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ rtw89_leave_ips(rtwdev);
+
+ ether_addr_copy(rtwvif->mac_addr, mac_addr);
+ rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
+ rtw89_chip_rfk_scan(rtwdev, true);
+ rtw89_hci_recalc_int_mit(rtwdev);
+
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+}
+
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, bool hw_scan)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+
+ rtw89_chip_rfk_scan(rtwdev, false);
+ rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
+
+ rtwdev->scanning = false;
+ rtwdev->dig.bypass_dig = true;
+ if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
+}
+
static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 cv;
cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
- if (cv <= CHIP_CBV) {
+ if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) {
if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
cv = CHIP_CAV;
else
@@ -2347,6 +2746,13 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
rtwdev->hal.cv = cv;
}
+static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hal.support_cckpd =
+ !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
+ !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
+}
+
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
int ret;
@@ -2367,6 +2773,8 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ rtw89_core_setup_phycap(rtwdev);
+
rtw89_mac_pwr_off(rtwdev);
return 0;
@@ -2438,13 +2846,18 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
+ hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
ret = rtw89_core_set_supported_band(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 7c84556ec4ad..771722132c53 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -13,9 +13,9 @@
#include <net/mac80211.h>
struct rtw89_dev;
+struct rtw89_pci_info;
extern const struct ieee80211_ops rtw89_ops;
-extern const struct rtw89_chip_info rtw8852a_chip_info;
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
@@ -33,7 +33,6 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
#define MAX_RSSI 110
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
-#define RTW89_MAX_HW_PORT_NUM 5
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
@@ -63,6 +62,15 @@ enum rtw89_subband {
RTW89_CH_5G_BAND_3 = 3,
RTW89_CH_5G_BAND_4 = 4,
+ RTW89_CH_6G_BAND_IDX0, /* Low */
+ RTW89_CH_6G_BAND_IDX1, /* Low */
+ RTW89_CH_6G_BAND_IDX2, /* Mid */
+ RTW89_CH_6G_BAND_IDX3, /* Mid */
+ RTW89_CH_6G_BAND_IDX4, /* High */
+ RTW89_CH_6G_BAND_IDX5, /* High */
+ RTW89_CH_6G_BAND_IDX6, /* Ultra-high */
+ RTW89_CH_6G_BAND_IDX7, /* Ultra-high */
+
RTW89_SUBBAND_NR,
};
@@ -140,11 +148,11 @@ enum rtw89_wifi_role {
};
enum rtw89_upd_mode {
- RTW89_VIF_CREATE,
- RTW89_VIF_REMOVE,
- RTW89_VIF_TYPE_CHANGE,
- RTW89_VIF_INFO_CHANGE,
- RTW89_VIF_CON_DISCONN
+ RTW89_ROLE_CREATE,
+ RTW89_ROLE_REMOVE,
+ RTW89_ROLE_TYPE_CHANGE,
+ RTW89_ROLE_INFO_CHANGE,
+ RTW89_ROLE_CON_DISCONN
};
enum rtw89_self_role {
@@ -205,6 +213,7 @@ enum rtw89_port {
enum rtw89_band {
RTW89_BAND_2G = 0,
RTW89_BAND_5G = 1,
+ RTW89_BAND_6G = 2,
RTW89_BAND_MAX,
};
@@ -363,6 +372,25 @@ enum rtw89_hw_rate {
*/
#define RTW89_5G_CH_NUM 53
+/* 6G channels,
+ * 1, 3, 5, 7, 9, 11, 13, 15,
+ * 17, 19, 21, 23, 25, 27, 29, 33,
+ * 35, 37, 39, 41, 43, 45, 47, 49,
+ * 51, 53, 55, 57, 59, 61, 65, 67,
+ * 69, 71, 73, 75, 77, 79, 81, 83,
+ * 85, 87, 89, 91, 93, 97, 99, 101,
+ * 103, 105, 107, 109, 111, 113, 115, 117,
+ * 119, 121, 123, 125, 129, 131, 133, 135,
+ * 137, 139, 141, 143, 145, 147, 149, 151,
+ * 153, 155, 157, 161, 163, 165, 167, 169,
+ * 171, 173, 175, 177, 179, 181, 183, 185,
+ * 187, 189, 193, 195, 197, 199, 201, 203,
+ * 205, 207, 209, 211, 213, 215, 217, 219,
+ * 221, 225, 227, 229, 231, 233, 235, 237,
+ * 239, 241, 243, 245, 247, 249, 251, 253,
+ */
+#define RTW89_6G_CH_NUM 120
+
enum rtw89_rate_section {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -421,9 +449,6 @@ enum rtw89_regulation_type {
RTW89_REGD_NUM,
};
-extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
-extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
-
struct rtw89_txpwr_byrate {
s8 cck[RTW89_RATE_CCK_MAX];
s8 ofdm[RTW89_RATE_OFDM_MAX];
@@ -548,7 +573,8 @@ enum rtw89_ps_mode {
};
#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
-#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
enum rtw89_ru_bandwidth {
@@ -564,19 +590,26 @@ enum rtw89_sc_offset {
RTW89_SC_20_LOWER = 2,
RTW89_SC_20_UPMOST = 3,
RTW89_SC_20_LOWEST = 4,
+ RTW89_SC_20_UP2X = 5,
+ RTW89_SC_20_LOW2X = 6,
+ RTW89_SC_20_UP3X = 7,
+ RTW89_SC_20_LOW3X = 8,
RTW89_SC_40_UPPER = 9,
RTW89_SC_40_LOWER = 10,
};
struct rtw89_channel_params {
u8 center_chan;
+ u32 center_freq;
u8 primary_chan;
u8 bandwidth;
u8 pri_ch_idx;
+ u8 band_type;
+ u8 subband_type;
};
struct rtw89_channel_help_params {
- u16 tx_en;
+ u32 tx_en;
};
struct rtw89_port_reg {
@@ -670,6 +703,7 @@ struct rtw89_rxdesc_long {
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
+ u8 mac_id;
u8 qsel;
u8 ch_dma;
u8 hdr_llc_len;
@@ -691,6 +725,12 @@ struct rtw89_tx_desc_info {
bool fw_dl;
u16 seq;
bool a_ctrl_bsr;
+ u8 hw_ssn_sel;
+#define RTW89_MGMT_HW_SSN_SEL 1
+ u8 hw_seq_mode;
+#define RTW89_MGMT_HW_SEQ_MODE 1
+ bool hiq;
+ u8 port;
};
struct rtw89_core_tx_request {
@@ -1048,7 +1088,7 @@ struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
union rtw89_btc_wl_role_info_map role_map;
- struct rtw89_btc_wl_active_role active_role[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_active_role active_role[RTW89_PORT_NUM];
};
struct rtw89_btc_wl_ver_info {
@@ -1151,7 +1191,7 @@ struct rtw89_btc_rf_para {
};
struct rtw89_btc_wl_info {
- struct rtw89_btc_wl_link_info link_info[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM];
struct rtw89_btc_wl_rfk_info rfk_info;
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
@@ -1831,27 +1871,10 @@ struct rtw89_ra_report {
DECLARE_EWMA(rssi, 10, 16);
-struct rtw89_sta {
- u8 mac_id;
- bool disassoc;
- struct rtw89_vif *rtwvif;
- struct rtw89_ra_info ra;
- struct rtw89_ra_report ra_report;
- int max_agg_wait;
- u8 prev_rssi;
- struct ewma_rssi avg_rssi;
- struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
- struct ieee80211_rx_status rx_status;
- u16 rx_hw_rate;
- __le32 htc_template;
+#define RTW89_BA_CAM_NUM 2
- bool use_cfg_mask;
- struct cfg80211_bitrate_mask mask;
-
- bool cctl_tx_time;
- u32 ampdu_max_time:4;
- bool cctl_tx_retry_limit;
- u32 data_tx_cnt_lmt:6;
+struct rtw89_ba_cam_entry {
+ u8 tid;
};
#define RTW89_MAX_ADDR_CAM_NUM 128
@@ -1868,7 +1891,6 @@ struct rtw89_addr_cam_entry {
u8 wapi : 1;
u8 mask_sel : 2;
u8 bssid_cam_idx: 6;
- u8 sma[ETH_ALEN];
u8 sec_ent_mode;
DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
@@ -1898,6 +1920,33 @@ struct rtw89_sec_cam_entry {
u8 key[32];
};
+struct rtw89_sta {
+ u8 mac_id;
+ bool disassoc;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_ra_info ra;
+ struct rtw89_ra_report ra_report;
+ int max_agg_wait;
+ u8 prev_rssi;
+ struct ewma_rssi avg_rssi;
+ struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ struct ieee80211_rx_status rx_status;
+ u16 rx_hw_rate;
+ __le32 htc_template;
+ struct rtw89_addr_cam_entry addr_cam; /* AP mode only */
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask mask;
+
+ bool cctl_tx_time;
+ u32 ampdu_max_time:4;
+ bool cctl_tx_retry_limit;
+ u32 data_tx_cnt_lmt:6;
+
+ DECLARE_BITMAP(ba_cam_map, RTW89_BA_CAM_NUM);
+ struct rtw89_ba_cam_entry ba_cam_entry[RTW89_BA_CAM_NUM];
+};
+
struct rtw89_efuse {
bool valid;
u8 xtal_cap;
@@ -1915,6 +1964,7 @@ struct rtw89_phy_rate_pattern {
struct rtw89_vif {
struct list_head list;
+ struct rtw89_dev *rtwdev;
u8 mac_id;
u8 port;
u8 mac_addr[ETH_ALEN];
@@ -1936,11 +1986,14 @@ struct rtw89_vif {
bool wowlan_magic;
bool is_hesta;
bool last_a_ctrl;
+ struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
struct rtw89_traffic_stats stats;
struct rtw89_phy_rate_pattern rate_pattern;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_scan_ies *scan_ies;
};
enum rtw89_lv1_rcvy_step {
@@ -2012,7 +2065,15 @@ struct rtw89_chip_ops {
struct ieee80211_rx_status *status);
void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
- s16 pw_ofst, enum rtw89_mac_idx mac_idx);
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ int (*pwr_on_func)(struct rtw89_dev *rtwdev);
+ int (*pwr_off_func)(struct rtw89_dev *rtwdev);
+ int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
+ int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+ int (*stop_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+ int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -2133,6 +2194,7 @@ struct rtw89_ple_quota {
u16 bb_rpt;
u16 wd_rel;
u16 cpu_io;
+ u16 tx_rpt;
};
struct rtw89_dle_mem {
@@ -2173,6 +2235,8 @@ struct rtw89_phy_table {
const struct rtw89_reg2_def *regs;
u32 n_regs;
enum rtw89_rf_path rf_path;
+ void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path, void *data);
};
struct rtw89_txpwr_table {
@@ -2182,6 +2246,21 @@ struct rtw89_txpwr_table {
const struct rtw89_txpwr_table *tbl);
};
+struct rtw89_page_regs {
+ u32 hci_fc_ctrl;
+ u32 ch_page_ctrl;
+ u32 ach_page_ctrl;
+ u32 ach_page_info;
+ u32 pub_page_info3;
+ u32 pub_page_ctrl1;
+ u32 pub_page_ctrl2;
+ u32 pub_page_info1;
+ u32 pub_page_info2;
+ u32 wp_page_ctrl1;
+ u32 wp_page_ctrl2;
+ u32 wp_page_info1;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
@@ -2192,6 +2271,8 @@ struct rtw89_chip_info {
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
+ u8 support_bands;
+ bool support_bw160;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -2203,6 +2284,8 @@ struct rtw89_chip_info {
u32 physical_efuse_size;
u32 logical_efuse_size;
u32 limit_efuse_size;
+ u32 dav_phy_efuse_size;
+ u32 dav_log_efuse_size;
u32 phycap_addr;
u32 phycap_size;
@@ -2219,10 +2302,15 @@ struct rtw89_chip_info {
const s8 (*txpwr_lmt_5g)[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_6g)[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
const s8 (*txpwr_lmt_ru_2g)[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM];
const s8 (*txpwr_lmt_ru_5g)[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_ru_6g)[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_6G_CH_NUM];
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@@ -2245,6 +2333,24 @@ struct rtw89_chip_info {
u8 rf_para_dlink_num;
const struct rtw89_btc_rf_trx_para *rf_para_dlink;
u8 ps_mode_supported;
+
+ u32 hci_func_en_addr;
+ u32 h2c_ctrl_reg;
+ const u32 *h2c_regs;
+ u32 c2h_ctrl_reg;
+ const u32 *c2h_regs;
+ const struct rtw89_page_regs *page_regs;
+ const struct rtw89_reg_def *dcfo_comp;
+ u8 dcfo_comp_sft;
+};
+
+union rtw89_bus_info {
+ const struct rtw89_pci_info *pci;
+};
+
+struct rtw89_driver_info {
+ const struct rtw89_chip_info *chip;
+ union rtw89_bus_info bus;
};
enum rtw89_hcifc_mode {
@@ -2312,6 +2418,8 @@ struct rtw89_fw_info {
struct rtw89_fw_suit wowlan;
bool fw_log_enable;
bool old_ht_ra_format;
+ bool scan_offload;
+ bool tx_wake;
};
struct rtw89_cam_info {
@@ -2348,19 +2456,23 @@ struct rtw89_hal {
u32 rx_fltr;
u8 cv;
u8 current_channel;
+ u32 current_freq;
u8 prev_primary_channel;
u8 current_primary_channel;
enum rtw89_subband current_subband;
u8 current_band_width;
+ u8 prev_band_type;
u8 current_band_type;
u32 sw_amsdu_max_size;
u32 antenna_tx;
u32 antenna_rx;
u8 tx_nss;
u8 rx_nss;
+ bool support_cckpd;
};
#define RTW89_MAX_MAC_ID_NUM 128
+#define RTW89_MAX_PKT_OFLD_NUM 255
enum rtw89_flags {
RTW89_FLAG_POWERON,
@@ -2556,22 +2668,30 @@ struct rtw89_cfo_tracking_info {
s32 residual_cfo_acc;
u8 phy_cfotrk_state;
u8 phy_cfotrk_cnt;
+ bool divergence_lock_en;
+ u8 x_cap_lb;
+ u8 x_cap_ub;
+ u8 lock_cnt;
};
/* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */
#define TSSI_TRIM_CH_GROUP_NUM 8
+#define TSSI_TRIM_CH_GROUP_NUM_6G 16
#define TSSI_CCK_CH_GROUP_NUM 6
#define TSSI_MCS_2G_CH_GROUP_NUM 5
#define TSSI_MCS_5G_CH_GROUP_NUM 14
+#define TSSI_MCS_6G_CH_GROUP_NUM 32
#define TSSI_MCS_CH_GROUP_NUM \
(TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM)
struct rtw89_tssi_info {
u8 thermal[RF_PATH_MAX];
s8 tssi_trim[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM];
+ s8 tssi_trim_6g[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM_6G];
s8 tssi_cck[RF_PATH_MAX][TSSI_CCK_CH_GROUP_NUM];
s8 tssi_mcs[RF_PATH_MAX][TSSI_MCS_CH_GROUP_NUM];
+ s8 tssi_6g_mcs[RF_PATH_MAX][TSSI_MCS_6G_CH_GROUP_NUM];
s8 extra_ofst[RF_PATH_MAX];
bool tssi_tracking_check[RF_PATH_MAX];
u8 default_txagc_offset[RF_PATH_MAX];
@@ -2769,12 +2889,23 @@ struct rtw89_early_h2c {
u16 h2c_len;
};
+struct rtw89_hw_scan_info {
+ struct ieee80211_vif *scanning_vif;
+ struct list_head pkt_list[NUM_NL80211_BANDS];
+ u8 op_pri_ch;
+ u8 op_chan;
+ u8 op_bw;
+ u8 op_band;
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
bool dbcc_en;
+ struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
+ const struct rtw89_pci_info *pci_info;
struct rtw89_hal hal;
struct rtw89_mac_info mac;
struct rtw89_fw_info fw;
@@ -2795,19 +2926,23 @@ struct rtw89_dev {
/* txqs to setup ba session */
struct list_head ba_list;
struct work_struct ba_work;
+ /* used to protect rpwm */
+ spinlock_t rpwm_lock;
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct list_head early_h2c_list;
struct rtw89_ser ser;
- DECLARE_BITMAP(hw_port, RTW89_MAX_HW_PORT_NUM);
+ DECLARE_BITMAP(hw_port, RTW89_PORT_NUM);
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
+ DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
struct rtw89_dack_info dack;
@@ -2847,7 +2982,7 @@ struct rtw89_dev {
int napi_budget_countdown;
/* HCI related data, keep last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
@@ -3128,6 +3263,46 @@ static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta)
return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
}
+static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw)
+{
+ if (hw_bw == RTW89_CHANNEL_WIDTH_160)
+ return RATE_INFO_BW_160;
+ else if (hw_bw == RTW89_CHANNEL_WIDTH_80)
+ return RATE_INFO_BW_80;
+ else if (hw_bw == RTW89_CHANNEL_WIDTH_40)
+ return RATE_INFO_BW_40;
+ else
+ return RATE_INFO_BW_20;
+}
+
+static inline
+enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
+{
+ switch (width) {
+ default:
+ WARN(1, "Not support bandwidth %d\n", width);
+ fallthrough;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return RTW89_CHANNEL_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return RTW89_CHANNEL_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return RTW89_CHANNEL_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ return RTW89_CHANNEL_WIDTH_160;
+ }
+}
+
+static inline
+struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE && rtwsta)
+ return &rtwsta->addr_cam;
+ return &rtwvif->addr_cam;
+}
+
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
struct rtw89_channel_help_params *p)
@@ -3298,6 +3473,39 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
chip->ops->ctrl_btg(rtwdev, btg);
}
+static inline
+void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->mac_cfg_gnt(rtwdev, gnt_cfg);
+}
+
+static inline void rtw89_chip_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->cfg_ctrl_path(rtwdev, wl);
+}
+
+static inline
+int rtw89_chip_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->stop_sch_tx(rtwdev, mac_idx, tx_en, sel);
+}
+
+static inline
+int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->resume_sch_tx(rtwdev, mac_idx, tx_en);
+}
+
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
__le16 fc = hdr->frame_control;
@@ -3371,6 +3579,8 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate);
@@ -3381,5 +3591,10 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
+void rtw89_core_update_beacon_work(struct work_struct *work);
+void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ const u8 *mac_addr, bool hw_scan);
+void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, bool hw_scan);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 22bd1d03e722..b73cc03cecfd 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -2324,16 +2324,17 @@ rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
static const struct rtw89_rx_rate_cnt_info {
enum rtw89_hw_rate first_rate;
int len;
+ int ext;
const char *rate_mode;
} rtw89_rx_rate_cnt_infos[] = {
- {RTW89_HW_RATE_CCK1, 4, "Legacy:"},
- {RTW89_HW_RATE_OFDM6, 8, "OFDM:"},
- {RTW89_HW_RATE_MCS0, 8, "HT 0:"},
- {RTW89_HW_RATE_MCS8, 8, "HT 1:"},
- {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, "VHT 1SS:"},
- {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, "VHT 2SS:"},
- {RTW89_HW_RATE_HE_NSS1_MCS0, 12, "HE 1SS:"},
- {RTW89_HW_RATE_HE_NSS2_MCS0, 12, "HE 2ss:"},
+ {RTW89_HW_RATE_CCK1, 4, 0, "Legacy:"},
+ {RTW89_HW_RATE_OFDM6, 8, 0, "OFDM:"},
+ {RTW89_HW_RATE_MCS0, 8, 0, "HT 0:"},
+ {RTW89_HW_RATE_MCS8, 8, 0, "HT 1:"},
+ {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, 2, "VHT 1SS:"},
+ {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, 2, "VHT 2SS:"},
+ {RTW89_HW_RATE_HE_NSS1_MCS0, 12, 0, "HE 1SS:"},
+ {RTW89_HW_RATE_HE_NSS2_MCS0, 12, 0, "HE 2ss:"},
};
static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
@@ -2358,6 +2359,11 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
seq_printf(m, "%10s [", info->rate_mode);
rtw89_debug_append_rx_rate(m, pkt_stat,
info->first_rate, info->len);
+ if (info->ext) {
+ seq_puts(m, "][");
+ rtw89_debug_append_rx_rate(m, pkt_stat,
+ info->first_rate + info->len, info->ext);
+ }
seq_puts(m, "]\n");
}
@@ -2366,6 +2372,72 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
return 0;
}
+static void rtw89_dump_addr_cam(struct seq_file *m,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_sec_cam_entry *sec_entry;
+ int i;
+
+ seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
+ seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
+ seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
+ addr_cam->sec_cam_map);
+ for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
+ sec_entry = addr_cam->sec_entries[i];
+ if (!sec_entry)
+ continue;
+ seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
+ if (sec_entry->ext_key)
+ seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1);
+ seq_puts(m, "\n");
+ }
+}
+
+static
+void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
+ seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
+ rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+}
+
+static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+
+ seq_printf(m, "STA [%d] %pM\n", rtwsta->mac_id, sta->addr);
+ rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+}
+
+static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ seq_puts(m, "map:\n");
+ seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
+ rtwdev->mac_id_map);
+ seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map),
+ cam_info->addr_cam_map);
+ seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map),
+ cam_info->bssid_cam_map);
+ seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
+ cam_info->sec_cam_map);
+
+ ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
+ IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+
+ return 0;
+}
+
static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
.cb_read = rtw89_debug_priv_read_reg_get,
.cb_write = rtw89_debug_priv_read_reg_select,
@@ -2432,6 +2504,10 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
.cb_read = rtw89_debug_priv_phy_info_get,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
+ .cb_read = rtw89_debug_priv_stations_get,
+};
+
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -2470,6 +2546,7 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_w(btc_manual);
rtw89_debugfs_add_w(fw_log_manual);
rtw89_debugfs_add_r(phy_info);
+ rtw89_debugfs_add_r(stations);
}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index f14b726c1a9f..1745815f5e00 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -23,6 +23,7 @@ enum rtw89_debug_mask {
RTW89_DBG_FW = BIT(12),
RTW89_DBG_BTC = BIT(13),
RTW89_DBG_BF = BIT(14),
+ RTW89_DBG_HW_SCAN = BIT(15),
};
enum rtw89_debug_mac_reg_sel {
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c
index c0b80f3da56c..7bd4f8558e03 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse.c
@@ -4,6 +4,7 @@
#include "debug.h"
#include "efuse.h"
+#include "mac.h"
#include "reg.h"
enum rtw89_efuse_bank {
@@ -16,6 +17,9 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
{
u8 val;
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return 0;
+
val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1,
B_AX_EF_CELL_SEL_MASK);
if (bank == val)
@@ -32,14 +36,61 @@ static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
return -EBUSY;
}
-static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
- u32 dump_addr, u32 dump_size)
+static void rtw89_enable_otp_burst_mode(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_write32_set(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST);
+}
+
+static void rtw89_enable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (chip_id == RTL8852A)
+ return;
+
+ rtw89_write8_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+
+ fsleep(1000);
+
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+ rtw89_enable_otp_burst_mode(rtwdev, true);
+}
+
+static void rtw89_disable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (chip_id == RTL8852A)
+ return;
+
+ if (chip_id == RTL8852B && hal->cv == CHIP_CAV)
+ rtw89_enable_otp_burst_mode(rtwdev, false);
+
+ rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write8_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+}
+
+static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
{
u32 efuse_ctl;
u32 addr;
int ret;
- rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+ rtw89_enable_efuse_pwr_cut_ddv(rtwdev);
for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK);
@@ -54,6 +105,74 @@ static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
*map++ = (u8)(efuse_ctl & 0xff);
}
+ rtw89_disable_efuse_pwr_cut_ddv(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
+{
+ u32 addr;
+ u8 val8;
+ int err;
+ int ret;
+
+ for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40, FULL_BIT_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR,
+ addr & 0xff, XTAL_SI_LOW_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8,
+ XTAL_SI_HIGH_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0,
+ XTAL_SI_MODE_SEL_MASK);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err,
+ !err && (val8 & XTAL_SI_RDY),
+ 1, 10000, false,
+ rtwdev, XTAL_SI_CTRL, &val8);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read dav efuse\n");
+ return ret;
+ }
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8);
+ if (ret)
+ return ret;
+ *map++ = val8;
+ }
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size, bool dav)
+{
+ int ret;
+
+ if (!map || dump_size == 0)
+ return 0;
+
+ rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+
+ if (dav) {
+ ret = rtw89_dump_physical_efuse_map_dav(rtwdev, map, dump_addr, dump_size);
+ if (ret)
+ return ret;
+ } else {
+ ret = rtw89_dump_physical_efuse_map_ddv(rtwdev, map, dump_addr, dump_size);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -78,6 +197,9 @@ static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map,
u8 word_en;
int i;
+ if (!phy_map)
+ return 0;
+
while (phy_idx < physical_size - sec_ctrl_size) {
hdr1 = phy_map[phy_idx];
hdr2 = phy_map[phy_idx + 1];
@@ -109,8 +231,13 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
{
u32 phy_size = rtwdev->chip->physical_efuse_size;
u32 log_size = rtwdev->chip->logical_efuse_size;
+ u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size;
+ u32 dav_log_size = rtwdev->chip->dav_log_efuse_size;
+ u32 full_log_size = log_size + dav_log_size;
u8 *phy_map = NULL;
u8 *log_map = NULL;
+ u8 *dav_phy_map = NULL;
+ u8 *dav_log_map = NULL;
int ret;
if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS)
@@ -119,27 +246,41 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
rtw89_warn(rtwdev, "failed to check efuse autoload\n");
phy_map = kmalloc(phy_size, GFP_KERNEL);
- log_map = kmalloc(log_size, GFP_KERNEL);
+ log_map = kmalloc(full_log_size, GFP_KERNEL);
+ if (dav_phy_size && dav_log_size) {
+ dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL);
+ dav_log_map = log_map + log_size;
+ }
- if (!phy_map || !log_map) {
+ if (!phy_map || !log_map || (dav_phy_size && !dav_phy_map)) {
ret = -ENOMEM;
goto out_free;
}
- ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size);
+ ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size, false);
if (ret) {
rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
goto out_free;
}
+ ret = rtw89_dump_physical_efuse_map(rtwdev, dav_phy_map, 0, dav_phy_size, true);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n");
+ goto out_free;
+ }
- memset(log_map, 0xff, log_size);
+ memset(log_map, 0xff, full_log_size);
ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map);
if (ret) {
rtw89_warn(rtwdev, "failed to dump efuse logical map\n");
goto out_free;
}
+ ret = rtw89_dump_logical_efuse_map(rtwdev, dav_phy_map, dav_log_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav logical map\n");
+ goto out_free;
+ }
- rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, log_size);
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, full_log_size);
ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map);
if (ret) {
@@ -148,6 +289,7 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
}
out_free:
+ kfree(dav_phy_map);
kfree(log_map);
kfree(phy_map);
@@ -169,7 +311,7 @@ int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev)
return -ENOMEM;
ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map,
- phycap_addr, phycap_size);
+ phycap_addr, phycap_size, false);
if (ret) {
rtw89_warn(rtwdev, "failed to dump phycap map\n");
goto out_free;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 8a57b75b07c0..6deaf8eec6b4 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -201,6 +201,14 @@ static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
if (chip->chip_id == RTL8852A &&
RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
rtwdev->fw.old_ht_ra_format = true;
+
+ if (chip->chip_id == RTL8852A &&
+ RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
+ rtwdev->fw.scan_offload = true;
+
+ if (chip->chip_id == RTL8852A &&
+ RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0))
+ rtwdev->fw.tx_wake = true;
}
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
@@ -555,11 +563,27 @@ fail:
return -EBUSY;
}
-#define H2C_BA_CAM_LEN 4
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params)
+#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
{
+ u8 macid = rtwsta->mac_id;
struct sk_buff *skb;
+ u8 entry_idx;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
if (!skb) {
@@ -568,6 +592,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
+ SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
if (!valid)
goto end;
SET_BA_CAM_VALID(skb->data, valid);
@@ -577,7 +602,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
else
SET_BA_CAM_BMAP_SIZE(skb->data, 0);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 0);
+ SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
end:
@@ -716,12 +741,14 @@ fail:
}
#define H2C_CMC_TBL_LEN 68
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct sk_buff *skb;
u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+ u8 macid = rtwvif->mac_id;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
if (!skb) {
@@ -743,6 +770,8 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
SET_CMC_TBL_ANTSEL_D(skb->data, 0);
SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
@@ -821,13 +850,15 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
memset(pads, 0, sizeof(pads));
- __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ if (sta)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
if (!skb) {
@@ -835,7 +866,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
return -ENOMEM;
}
skb_put(skb, H2C_CMC_TBL_LEN);
- SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_MACID(skb->data, mac_id);
SET_CTRL_INFO_OPERATION(skb->data, 1);
SET_CMC_TBL_DISRTSFB(skb->data, 1);
SET_CMC_TBL_DISDATAFB(skb->data, 1);
@@ -853,7 +884,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
- SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (sta)
+ SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
@@ -911,28 +945,93 @@ fail:
return -EBUSY;
}
-#define H2C_VIF_MAINTAIN_LEN 4
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode)
+#define H2C_BCN_BASE_LEN 12
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct sk_buff *skb;
+ struct sk_buff *skb_beacon;
+ u16 tim_offset;
+ int bcn_total_len;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, NULL);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN);
+ bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BCN_BASE_LEN);
+
+ SET_BCN_UPD_PORT(skb->data, rtwvif->port);
+ SET_BCN_UPD_MBSSID(skb->data, 0);
+ SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
+ SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
+ SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
+ SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
+ SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
+ SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ?
+ RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD, 0, 1,
+ bcn_total_len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define H2C_ROLE_MAINTAIN_LEN 4
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode)
+{
+ struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role;
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ if (rtwsta)
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ else
+ self_role = rtwvif->self_role;
+ } else {
+ self_role = rtwvif->self_role;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_ROLE_MAINTAIN_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_VIF_MAINTAIN_LEN);
- SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id);
- SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role);
+ skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
+ SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
+ SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
- H2C_VIF_MAINTAIN_LEN);
+ H2C_ROLE_MAINTAIN_LEN);
if (rtw89_h2c_tx(rtwdev, skb, false)) {
rtw89_err(rtwdev, "failed to send h2c\n");
@@ -948,9 +1047,17 @@ fail:
#define H2C_JOIN_INFO_LEN 4
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn)
+ struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role = rtwvif->self_role;
+ u8 net_type = rtwvif->net_type;
+
+ if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
+ }
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
if (!skb) {
@@ -958,7 +1065,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
return -ENOMEM;
}
skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, rtwvif->mac_id);
+ SET_JOININFO_MACID(skb->data, mac_id);
SET_JOININFO_OP(skb->data, dis_conn);
SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
SET_JOININFO_WMM(skb->data, rtwvif->wmm);
@@ -968,9 +1075,9 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
SET_JOININFO_TF_MAC_PAD(skb->data, 0);
SET_JOININFO_DL_T_PE(skb->data, 0);
SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type);
+ SET_JOININFO_NET_TYPE(skb->data, net_type);
SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role);
+ SET_JOININFO_SELF_ROLE(skb->data, self_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
@@ -1212,7 +1319,7 @@ fail:
return -EBUSY;
}
-#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1251,7 +1358,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
@@ -1368,6 +1475,198 @@ fail:
return -EBUSY;
}
+#define H2C_LEN_PKT_OFLD 4
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_PKT_OFLD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PACKET_OFLD, 1, 1,
+ H2C_LEN_PKT_OFLD);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+ struct sk_buff *skb_ofld)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+ u8 alloc_id;
+
+ alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
+ RTW89_MAX_PKT_OFLD_NUM);
+ if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
+ return -ENOSPC;
+
+ *id = alloc_id;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD + skb_ofld->len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_PKT_OFLD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
+ RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
+ skb_put_data(skb, skb_ofld->data, skb_ofld->len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PACKET_OFLD, 1, 1,
+ H2C_LEN_PKT_OFLD + skb_ofld->len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_SCAN_LIST_OFFLOAD 4
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+ struct list_head *chan_list)
+{
+ struct rtw89_mac_chinfo *ch_info;
+ struct sk_buff *skb;
+ int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
+ /* in unit of 4 bytes */
+ RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
+
+ RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
+ RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
+ RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
+ RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
+ RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
+ RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
+ RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
+ RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
+ RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
+ RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
+ RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
+ RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
+ RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
+ RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
+ RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
+ RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
+ RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
+ RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
+ RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
+ RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
+ RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
+ RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_SCAN_OFFLOAD 20
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_SCAN_OFFLOAD);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
+ RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
+ RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
+ RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
+ RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
+ RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
+ RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
+ if (option->target_ch_mode) {
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
+ scan_info->op_pri_ch);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
+ scan_info->op_chan);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD, 1, 1,
+ H2C_LEN_SCAN_OFFLOAD);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page)
@@ -1533,15 +1832,13 @@ void rtw89_fw_c2h_work(struct work_struct *work)
static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *info)
{
- static const u32 h2c_reg[RTW89_H2CREG_MAX] = {
- R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,
- R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3
- };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *h2c_reg = chip->h2c_regs;
u8 i, val, len;
int ret;
ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
- rtwdev, R_AX_H2CREG_CTRL);
+ rtwdev, chip->h2c_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "FW does not process h2c registers\n");
return ret;
@@ -1555,7 +1852,7 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_H2CREG_MAX; i++)
rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
- rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER);
+ rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
return 0;
}
@@ -1563,10 +1860,8 @@ static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *info)
{
- static const u32 c2h_reg[RTW89_C2HREG_MAX] = {
- R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1,
- R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3
- };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *c2h_reg = chip->c2h_regs;
u32 ret;
u8 i, val;
@@ -1574,7 +1869,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
RTW89_C2H_TIMEOUT, false, rtwdev,
- R_AX_C2HREG_CTRL);
+ chip->c2h_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "c2h reg timeout\n");
return ret;
@@ -1583,7 +1878,7 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_C2HREG_MAX; i++)
info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
- rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0);
+ rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
@@ -1640,3 +1935,322 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
rtw89_fw_prog_cnt_dump(rtwdev);
}
+
+static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+{
+ struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+ u8 idx;
+
+ for (idx = RTW89_BAND_2G; idx < NUM_NL80211_BANDS; idx++) {
+ if (!(rtwdev->chip->support_bands & BIT(idx)))
+ continue;
+
+ list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ rtw89_core_release_bit_map(rtwdev->pkt_offload,
+ info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+ }
+}
+
+static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct sk_buff *skb)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
+ struct rtw89_pktofld_info *info;
+ struct sk_buff *new;
+ int ret = 0;
+ u8 band;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ if (!(rtwdev->chip->support_bands & BIT(band)))
+ continue;
+
+ new = skb_copy(skb, GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ skb_put_data(new, ies->ies[band], ies->len[band]);
+ skb_put_data(new, ies->common_ies, ies->common_ie_len);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ kfree_skb(new);
+ goto out;
+ }
+
+ list_add_tail(&info->list, &scan_info->pkt_list[band]);
+ ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+ if (ret)
+ goto out;
+
+ kfree_skb(new);
+ }
+out:
+ return ret;
+}
+
+static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct sk_buff *skb;
+ u8 num = req->n_ssids, i;
+ int ret;
+
+ for (i = 0; i < num; i++) {
+ skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
+ req->ssids[i].ssid,
+ req->ssids[i].ssid_len,
+ req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
+ kfree_skb(skb);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_pkt = true;
+ ch_info->cfg_tx_pwr = false;
+ ch_info->tx_pwr_idx = 0;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+
+ if (ssid_num) {
+ ch_info->num_pkt = ssid_num;
+ band = ch_info->ch_band;
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ ch_info->probe_id = info->id;
+ ch_info->pkt_id[probe_count] = info->id;
+ if (++probe_count >= ssid_num)
+ break;
+ }
+ if (probe_count != ssid_num)
+ rtw89_err(rtwdev, "SSID num differs from list len\n");
+ }
+
+ switch (chan_type) {
+ case RTW89_CHAN_OPERATE:
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ ch_info->central_ch = scan_info->op_chan;
+ ch_info->pri_ch = scan_info->op_pri_ch;
+ ch_info->ch_band = scan_info->op_band;
+ ch_info->bw = scan_info->op_bw;
+ ch_info->tx_null = true;
+ ch_info->num_pkt = 0;
+ break;
+ case RTW89_CHAN_DFS:
+ ch_info->period = min_t(u8, ch_info->period,
+ RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_err(rtwdev, "Channel type out of bound\n");
+ }
+}
+
+static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
+ int list_len = req->n_channels, off_chan_time = 0;
+ enum rtw89_chan_type type;
+ int ret = 0, i;
+
+ INIT_LIST_HEAD(&chan_list);
+ for (i = 0; i < req->n_channels; i++) {
+ channel = req->channels[i];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ch_info->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ ch_info->ch_band = channel->band;
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+
+ if (channel->flags &
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
+ off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ kfree(ch_info);
+ goto out;
+ }
+
+ type = RTW89_CHAN_OPERATE;
+ tmp->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
+ list_add_tail(&tmp->list, &chan_list);
+ off_chan_time = 0;
+ list_len++;
+ }
+ list_add_tail(&ch_info->list, &chan_list);
+ off_chan_time += ch_info->period;
+ }
+ rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
+static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "Update probe request failed\n");
+ goto out;
+ }
+ ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
+out:
+ return ret;
+}
+
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = &scan_req->req;
+ u8 mac_addr[ETH_ALEN];
+
+ rtwdev->scan_info.scanning_vif = vif;
+ rtwvif->scan_ies = &scan_req->ies;
+ rtwvif->scan_req = req;
+ ieee80211_stop_queues(rtwdev->hw);
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ get_random_mask_addr(mac_addr, req->mac_addr,
+ req->mac_addr_mask);
+ else
+ ether_addr_copy(mac_addr, vif->addr);
+ rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
+
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+}
+
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+ struct rtw89_vif *rtwvif;
+
+ if (!vif)
+ return;
+
+ rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
+ rtwdev->hal.rx_fltr |= B_AX_A_BC;
+ rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+
+ rtw89_core_scan_complete(rtwdev, vif, true);
+ ieee80211_scan_completed(rtwdev->hw, &info);
+ ieee80211_wake_queues(rtwdev->hw);
+
+ rtw89_release_pkt_list(rtwdev);
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtwvif->scan_req = NULL;
+ rtwvif->scan_ies = NULL;
+ rtwdev->scan_info.scanning_vif = NULL;
+}
+
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ rtw89_hw_scan_offload(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, true);
+}
+
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct rtw89_scan_option opt = {0};
+ struct rtw89_vif *rtwvif;
+ int ret = 0;
+
+ rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+ if (!rtwvif)
+ return -EINVAL;
+
+ opt.enable = enable;
+ opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
+ if (enable) {
+ ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
+ if (ret)
+ goto out;
+ }
+ rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+out:
+ return ret;
+}
+
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ scan_info->op_pri_ch = hal->current_primary_channel;
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_band = hal->current_band_type;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 2d36dc27222f..ed8609b204e0 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -123,6 +123,27 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_MCC = 20,
};
+enum rtw89_pkt_offload_op {
+ RTW89_PKT_OFLD_OP_ADD,
+ RTW89_PKT_OFLD_OP_DEL,
+ RTW89_PKT_OFLD_OP_READ,
+};
+
+enum rtw89_scanofld_notify_reason {
+ RTW89_SCAN_DWELL_NOTIFY,
+ RTW89_SCAN_PRE_TX_NOTIFY,
+ RTW89_SCAN_POST_TX_NOTIFY,
+ RTW89_SCAN_ENTER_CH_NOTIFY,
+ RTW89_SCAN_LEAVE_CH_NOTIFY,
+ RTW89_SCAN_END_SCAN_NOTIFY,
+};
+
+enum rtw89_chan_type {
+ RTW89_CHAN_OPERATE = 0,
+ RTW89_CHAN_ACTIVE,
+ RTW89_CHAN_DFS,
+};
+
#define FWDL_SECTION_MAX_NUM 10
#define FWDL_SECTION_CHKSUM_LEN 8
#define FWDL_SECTION_PER_PKT_LEN 2020
@@ -156,6 +177,50 @@ struct rtw89_h2creg_sch_tx_en {
u16 rsvd:15;
} __packed;
+#define RTW89_CHANNEL_TIME 45
+#define RTW89_DFS_CHAN_TIME 105
+#define RTW89_OFF_CHAN_TIME 100
+#define RTW89_DWELL_TIME 20
+#define RTW89_SCAN_WIDTH 0
+#define RTW89_SCANOFLD_MAX_SSID 8
+#define RTW89_SCANOFLD_MAX_IE_LEN 512
+#define RTW89_SCANOFLD_PKT_NONE 0xFF
+#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_MAC_CHINFO_SIZE 20
+
+struct rtw89_mac_chinfo {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 notify_action:5;
+ u8 num_pkt:4;
+ u8 tx_pkt:1;
+ u8 pause_data:1;
+ u8 ch_band:2;
+ u8 probe_id;
+ u8 dfs_ch:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 cfg_tx_pwr:1;
+ u8 rsvd0: 4;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u16 tx_pwr_idx;
+ u8 rsvd1;
+ struct list_head list;
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+};
+
+struct rtw89_pktofld_info {
+ struct list_head list;
+ u8 id;
+};
+
static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0));
@@ -1056,6 +1121,106 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
+}
+
+static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
+}
+
+static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
+}
+
+static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
+}
+
+static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
+}
+
+static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
+}
+
+static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
+}
+
+static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
+}
+
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1226,6 +1391,26 @@ static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
}
+static inline void SET_BA_CAM_UID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
+}
+
+static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
+}
+
+static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
+}
+
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -1316,6 +1501,14 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_MAX,
};
+enum rtw89_scan_mode {
+ RTW89_SCAN_IMMEDIATE,
+};
+
+enum rtw89_scan_type {
+ RTW89_SCAN_ONCE,
+};
+
static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val)
{
u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0));
@@ -1586,6 +1779,242 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10));
}
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(10, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_CY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PORT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(18, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, BIT(19));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_OPERATION(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(21, 20));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 22));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_START_MODE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(2));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(4, 3));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 5));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(void *cmd,
+ u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_PROBE_REQ_PKT_ID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_NORM_PD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_SLOW_PD(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_HIGH(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(31, 0));
+}
+
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -1642,6 +2071,26 @@ static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val)
#define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \
FIELD_PREP(GENMASK(2, 0), mcs))
+#define RTW89_GET_MAC_C2H_PKTOFLD_ID(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_PKTOFLD_OP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(10, 8))
+#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
+
+#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
+#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
+#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
+#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
+#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+
#define RTW89_FW_HDR_SIZE 32
#define RTW89_FW_SECTION_HDR_SIZE 16
@@ -1709,6 +2158,7 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 5 - Frame Exchange */
#define H2C_CL_MAC_FR_EXCHG 0x5
#define H2C_FUNC_MAC_CCTLINFO_UD 0x2
+#define H2C_FUNC_MAC_BCN_UPD 0x5
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -1721,9 +2171,12 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 9 - FW offload */
#define H2C_CL_MAC_FW_OFLD 0x9
+#define H2C_FUNC_PACKET_OFLD 0x1
#define H2C_FUNC_MAC_MACID_PAUSE 0x8
#define H2C_FUNC_USR_EDCA 0xF
#define H2C_FUNC_OFLD_CFG 0x14
+#define H2C_FUNC_ADD_SCANOFLD_CH 0x16
+#define H2C_FUNC_SCANOFLD 0x17
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
@@ -1750,21 +2203,25 @@ int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev);
void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode);
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode);
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn);
+ struct rtw89_sta *rtwsta, bool dis_conn);
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause);
int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1775,6 +2232,14 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
+int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
+ struct sk_buff *skb_ofld);
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+ struct list_head *chan_list);
+int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
@@ -1785,8 +2250,9 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
+
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len);
@@ -1796,5 +2262,16 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *c2h_info);
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
+void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool aborted);
+int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ bool enable);
+void rtw89_hw_scan_status_report(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+void rtw89_hw_scan_chan_switch(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_store_op_chan(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b98c47e9ecfe..5e554bd9f036 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -172,6 +172,7 @@ static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
qempty.dle_type = DLE_CTRL_TYPE_PLE;
qempty.grpsel = 0;
+ qempty.qempty = ~(u32)0;
ret = dle_dfi_qempty(rtwdev, &qempty);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
@@ -481,9 +482,10 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
}
EXPORT_SYMBOL(rtw89_mac_set_err_status);
-const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = {
+const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie = {
2, 40, 0, 0, 1, 0, 0, 0
};
+EXPORT_SYMBOL(rtw89_hfc_preccfg_pcie);
static int hfc_reset_param(struct rtw89_dev *rtwdev)
{
@@ -567,6 +569,8 @@ static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev)
static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
int ret = 0;
@@ -586,13 +590,15 @@ static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
(cfg[ch].grp ? B_AX_GRP : 0);
- rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val);
+ rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val);
return 0;
}
static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
struct rtw89_hfc_ch_info *info = param->ch_info;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
@@ -606,7 +612,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
if (ch > RTW89_DMA_H2C)
return -EINVAL;
- val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4);
+ val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4);
info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK);
if (ch < RTW89_DMA_H2C)
info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK);
@@ -618,6 +624,8 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
u32 val;
int ret;
@@ -632,16 +640,18 @@ static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
- rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val);
+ rtw89_write32(rtwdev, regs->pub_page_ctrl1, val);
val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
- rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val);
+ rtw89_write32(rtwdev, regs->wp_page_ctrl2, val);
return 0;
}
static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -653,20 +663,20 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1);
+ val = rtw89_read32(rtwdev, regs->pub_page_info1);
info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3);
+ val = rtw89_read32(rtwdev, regs->pub_page_info3);
info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK);
info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK);
info->pub_aval =
- u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2),
+ u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2),
B_AX_PUB_AVAL_PG_MASK);
info->wp_aval =
- u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1),
+ u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1),
B_AX_WP_AVAL_PG_MASK);
- val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
param->en = val & B_AX_HCI_FC_EN ? 1 : 0;
param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0;
param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK);
@@ -679,21 +689,21 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
prec_cfg->wp_ch811_full_cond =
u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
- val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL);
+ val = rtw89_read32(rtwdev, regs->ch_page_ctrl);
prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK);
prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2);
+ val = rtw89_read32(rtwdev, regs->pub_page_ctrl2);
pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK);
- val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1);
+ val = rtw89_read32(rtwdev, regs->wp_page_ctrl1);
prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK);
prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK);
- val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2);
+ val = rtw89_read32(rtwdev, regs->wp_page_ctrl2);
pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK);
- val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1);
+ val = rtw89_read32(rtwdev, regs->pub_page_ctrl1);
pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
@@ -706,20 +716,24 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
u32 val;
val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
- rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+ rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
- rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL,
+ rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl,
B_AX_HCI_FC_CH12_FULL_COND_MASK,
prec_cfg->h2c_full_cond);
}
static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
@@ -727,18 +741,18 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) |
u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
- rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+ rtw89_write32(rtwdev, regs->ch_page_ctrl, val);
val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK);
- rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val);
+ rtw89_write32(rtwdev, regs->pub_page_ctrl2, val);
val = u32_encode_bits(prec_cfg->wp_ch07_prec,
B_AX_PREC_PAGE_WP_CH07_MASK) |
u32_encode_bits(prec_cfg->wp_ch811_prec,
B_AX_PREC_PAGE_WP_CH811_MASK);
- rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val);
+ rtw89_write32(rtwdev, regs->wp_page_ctrl1, val);
- val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL),
+ val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl),
param->mode, B_AX_HCI_FC_MODE_MASK);
val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
B_AX_HCI_FC_WD_FULL_COND_MASK);
@@ -748,21 +762,23 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
- rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+ rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_page_regs *regs = chip->page_regs;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
u32 val;
- val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ val = rtw89_read32(rtwdev, regs->hci_fc_ctrl);
param->en = en;
param->h2c_en = h2c_en;
val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN);
val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) :
(val & ~B_AX_HCI_FC_CH12_EN);
- rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+ rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
@@ -915,23 +931,31 @@ rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev)
}
static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev,
- enum rtw89_rpwm_req_pwr_state req_pwr_state)
+ enum rtw89_rpwm_req_pwr_state req_pwr_state,
+ bool notify_wake)
{
u16 request;
+ spin_lock_bh(&rtwdev->rpwm_lock);
+
request = rtw89_read16(rtwdev, R_AX_RPWM);
request ^= request | PS_RPWM_TOGGLE;
-
- rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
- RPWM_SEQ_NUM_MAX;
- request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num);
-
request |= req_pwr_state;
- if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
- request |= PS_RPWM_ACK;
+ if (notify_wake) {
+ request |= PS_RPWM_NOTIFY_WAKE;
+ } else {
+ rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
+ RPWM_SEQ_NUM_MAX;
+ request |= FIELD_PREP(PS_RPWM_SEQ_NUM,
+ rtwdev->mac.rpwm_seq_num);
+ if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
+ request |= PS_RPWM_ACK;
+ }
rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request);
+
+ spin_unlock_bh(&rtwdev->rpwm_lock);
}
static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
@@ -991,7 +1015,7 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
else
state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
- rtw89_mac_send_rpwm(rtwdev, state);
+ rtw89_mac_send_rpwm(rtwdev, state, false);
ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
1000, 15000, false, rtwdev, state);
if (ret)
@@ -999,19 +1023,31 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
enter ? "entering" : "leaving");
}
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_rpwm_req_pwr_state state;
+
+ state = rtw89_mac_get_req_pwr_state(rtwdev);
+ rtw89_mac_send_rpwm(rtwdev, state, true);
+}
+
static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
{
#define PWR_ACT 1
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_pwr_cfg * const *cfg_seq;
+ int (*cfg_func)(struct rtw89_dev *rtwdev);
struct rtw89_hal *hal = &rtwdev->hal;
int ret;
u8 val;
- if (on)
+ if (on) {
cfg_seq = chip->pwr_on_seq;
- else
+ cfg_func = chip->ops->pwr_on_func;
+ } else {
cfg_seq = chip->pwr_off_seq;
+ cfg_func = chip->ops->pwr_off_func;
+ }
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
__rtw89_leave_ps_mode(rtwdev);
@@ -1022,7 +1058,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
return -EBUSY;
}
- ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq);
+ ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq);
if (ret)
return ret;
@@ -1092,18 +1128,31 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
static int dmac_func_en(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
- val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
- B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
- B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN |
- B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN);
+ if (chip_id == RTL8852C)
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+ B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+ B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+ B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+ B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN);
+ else
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN |
+ B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN |
+ B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN |
+ B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN |
+ B_AX_DMAC_CRPRT);
rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32);
val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN |
B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
- B_AX_WD_RLS_CLK_EN);
+ B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
return 0;
@@ -1111,7 +1160,11 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
static int chip_func_en(struct rtw89_dev *rtwdev)
{
- rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK);
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ if (chip_id == RTL8852A)
+ rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0,
+ B_AX_OCP_L1_MASK);
return 0;
}
@@ -1136,49 +1189,118 @@ static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
}
/* PCIE 64 */
-const struct rtw89_dle_size wde_size0 = {
+const struct rtw89_dle_size rtw89_wde_size0 = {
RTW89_WDE_PG_64, 4095, 1,
};
+EXPORT_SYMBOL(rtw89_wde_size0);
/* DLFW */
-const struct rtw89_dle_size wde_size4 = {
+const struct rtw89_dle_size rtw89_wde_size4 = {
RTW89_WDE_PG_64, 0, 4096,
};
+EXPORT_SYMBOL(rtw89_wde_size4);
+
+/* 8852C DLFW */
+const struct rtw89_dle_size rtw89_wde_size18 = {
+ RTW89_WDE_PG_64, 0, 2048,
+};
+EXPORT_SYMBOL(rtw89_wde_size18);
+
+/* 8852C PCIE SCC */
+const struct rtw89_dle_size rtw89_wde_size19 = {
+ RTW89_WDE_PG_64, 3328, 0,
+};
+EXPORT_SYMBOL(rtw89_wde_size19);
/* PCIE */
-const struct rtw89_dle_size ple_size0 = {
+const struct rtw89_dle_size rtw89_ple_size0 = {
RTW89_PLE_PG_128, 1520, 16,
};
+EXPORT_SYMBOL(rtw89_ple_size0);
/* DLFW */
-const struct rtw89_dle_size ple_size4 = {
+const struct rtw89_dle_size rtw89_ple_size4 = {
RTW89_PLE_PG_128, 64, 1472,
};
+EXPORT_SYMBOL(rtw89_ple_size4);
+
+/* 8852C DLFW */
+const struct rtw89_dle_size rtw89_ple_size18 = {
+ RTW89_PLE_PG_128, 2544, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_size18);
+
+/* 8852C PCIE SCC */
+const struct rtw89_dle_size rtw89_ple_size19 = {
+ RTW89_PLE_PG_128, 1904, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_size19);
/* PCIE 64 */
-const struct rtw89_wde_quota wde_qt0 = {
+const struct rtw89_wde_quota rtw89_wde_qt0 = {
3792, 196, 0, 107,
};
+EXPORT_SYMBOL(rtw89_wde_qt0);
/* DLFW */
-const struct rtw89_wde_quota wde_qt4 = {
+const struct rtw89_wde_quota rtw89_wde_qt4 = {
0, 0, 0, 0,
};
+EXPORT_SYMBOL(rtw89_wde_qt4);
+
+/* 8852C DLFW */
+const struct rtw89_wde_quota rtw89_wde_qt17 = {
+ 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_wde_qt17);
+
+/* 8852C PCIE SCC */
+const struct rtw89_wde_quota rtw89_wde_qt18 = {
+ 3228, 60, 0, 40,
+};
+EXPORT_SYMBOL(rtw89_wde_qt18);
/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt4 = {
+const struct rtw89_ple_quota rtw89_ple_qt4 = {
264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
};
+EXPORT_SYMBOL(rtw89_ple_qt4);
/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt5 = {
+const struct rtw89_ple_quota rtw89_ple_qt5 = {
264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,
};
+EXPORT_SYMBOL(rtw89_ple_qt5);
/* DLFW */
-const struct rtw89_ple_quota ple_qt13 = {
+const struct rtw89_ple_quota rtw89_ple_qt13 = {
0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0
};
+EXPORT_SYMBOL(rtw89_ple_qt13);
+
+/* DLFW 52C */
+const struct rtw89_ple_quota rtw89_ple_qt44 = {
+ 0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_ple_qt44);
+
+/* DLFW 52C */
+const struct rtw89_ple_quota rtw89_ple_qt45 = {
+ 0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+EXPORT_SYMBOL(rtw89_ple_qt45);
+
+/* 8852C PCIE SCC */
+const struct rtw89_ple_quota rtw89_ple_qt46 = {
+ 525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,
+};
+EXPORT_SYMBOL(rtw89_ple_qt46);
+
+/* 8852C PCIE SCC */
+const struct rtw89_ple_quota rtw89_ple_qt47 = {
+ 525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,
+};
+EXPORT_SYMBOL(rtw89_ple_qt47);
static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
enum rtw89_qta_mode mode)
@@ -1334,6 +1456,8 @@ static void ple_quota_cfg(struct rtw89_dev *rtwdev,
SET_QUOTA(bb_rpt, PLE, 8);
SET_QUOTA(wd_rel, PLE, 9);
SET_QUOTA(cpu_io, PLE, 10);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ SET_QUOTA(tx_rpt, PLE, 11);
}
#undef SET_QUOTA
@@ -1421,6 +1545,43 @@ error:
return ret;
}
+static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ u32 reg, max_preld_size, min_rsvd_size;
+
+ max_preld_size = (mac_idx == RTW89_MAC_0 ?
+ PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ?
+ R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0;
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size);
+ rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN);
+
+ min_rsvd_size = PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ?
+ R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1;
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND);
+ rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size);
+
+ return 0;
+}
+
+static bool is_qta_poh(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
+}
+
+static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || !is_qta_poh(rtwdev))
+ return 0;
+
+ return preload_init_set(rtwdev, mac_idx, mode);
+}
+
static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
{
u32 msk32;
@@ -1528,6 +1689,12 @@ static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
+ return ret;
+ }
+
ret = hfc_init(rtwdev, true, true, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
@@ -2079,8 +2246,26 @@ static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
- u16 *tx_en, enum rtw89_sch_tx_sel sel)
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
{
int ret;
@@ -2089,7 +2274,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
switch (sel) {
case RTW89_SCH_TX_SEL_ALL:
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK);
if (ret)
return ret;
break;
@@ -2106,7 +2292,8 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
return ret;
break;
case RTW89_SCH_TX_SEL_MACID:
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK);
if (ret)
return ret;
break;
@@ -2116,17 +2303,73 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx);
+
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1);
+
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
{
int ret;
- ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff);
+ ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en,
+ B_AX_CTN_TXEN_ALL_MASK_V1);
if (ret)
return ret;
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
bool wd)
@@ -2290,9 +2533,9 @@ static int band1_enable(struct rtw89_dev *rtwdev)
int ret, i;
u32 sleep_bak[4] = {0};
u32 pause_bak[4] = {0};
- u16 tx_en;
+ u32 tx_en;
- ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
if (ret) {
rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret);
return ret;
@@ -2322,7 +2565,7 @@ static int band1_enable(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]);
}
- ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en);
+ ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret);
return ret;
@@ -2516,7 +2759,11 @@ static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
+ rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN |
+ B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
}
static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -2586,7 +2833,9 @@ static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
{
- rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN,
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
}
@@ -2705,7 +2954,7 @@ static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109);
}
-static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
{
u8 sh = FIELD_GET(GENMASK(4, 0), macid);
u8 grp = macid >> 5;
@@ -2864,6 +3113,36 @@ static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
bcn_int);
}
+static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u32 hiq_win_addr[RTW89_PORT_NUM] = {
+ R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
+ R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
+ R_AX_PORT_HGQ_WINDOW_CFG + 3,
+ };
+ u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
+ u8 port = rtwvif->port;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx);
+ rtw89_write8(rtwdev, reg, win);
+}
+
+static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx);
+ rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE);
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
+ vif->bss_conf.dtim_period);
+}
+
static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
@@ -2978,11 +3257,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
- ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false);
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false);
if (ret)
return ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE);
if (ret)
return ret;
@@ -2994,7 +3273,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id);
+ ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
if (ret)
return ret;
@@ -3005,7 +3284,7 @@ int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE);
if (ret)
return ret;
@@ -3034,13 +3313,15 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
- rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
@@ -3084,6 +3365,57 @@ rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
{
}
+static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ return band == scan_info->op_band && channel == scan_info->op_pri_ch;
+}
+
+static void
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 reason, status, tx_fail, band;
+ u16 chan;
+
+ tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
+ status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
+ chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
+ reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
+ band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
+
+ if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
+ band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d\n",
+ band, chan, reason, status, tx_fail);
+
+ switch (reason) {
+ case RTW89_SCAN_LEAVE_CH_NOTIFY:
+ if (rtw89_is_op_chan(rtwdev, band, chan))
+ ieee80211_stop_queues(rtwdev->hw);
+ return;
+ case RTW89_SCAN_END_SCAN_NOTIFY:
+ rtw89_hw_scan_complete(rtwdev, vif, false);
+ break;
+ case RTW89_SCAN_ENTER_CH_NOTIFY:
+ if (rtw89_is_op_chan(rtwdev, band, chan))
+ ieee80211_wake_queues(rtwdev->hw);
+ break;
+ default:
+ return;
+ }
+
+ hal->prev_band_type = hal->current_band_type;
+ hal->prev_primary_channel = hal->current_channel;
+ hal->current_channel = chan;
+ hal->current_band_type = band;
+}
+
static void
rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
@@ -3114,6 +3446,11 @@ rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
RTW89_GET_C2H_LOG_SRT_PRT(c2h->data));
}
+static void
+rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3122,6 +3459,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
+ [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
};
static
@@ -3130,6 +3468,7 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
+ [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
};
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
@@ -3192,6 +3531,7 @@ error:
return false;
}
+EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@@ -3216,6 +3556,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return ret;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -3349,33 +3690,37 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_coex_init);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
- u32 val, ret;
+ u32 val = 0, ret;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL;
- ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
- if (ret) {
- rtw89_err(rtwdev, "Read LTE fail!\n");
- return ret;
- }
- val = (gnt_cfg->band[0].gnt_bt ?
- B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) |
- (gnt_cfg->band[0].gnt_bt_sw_en ?
- B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) |
- (gnt_cfg->band[0].gnt_wl ?
- B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) |
- (gnt_cfg->band[0].gnt_wl_sw_en ?
- B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) |
- (gnt_cfg->band[1].gnt_bt ?
- B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) |
- (gnt_cfg->band[1].gnt_bt_sw_en ?
- B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) |
- (gnt_cfg->band[1].gnt_wl ?
- B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) |
- (gnt_cfg->band[1].gnt_wl_sw_en ?
- B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0);
ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
if (ret) {
rtw89_err(rtwdev, "Write LTE fail!\n");
@@ -3384,11 +3729,59 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt);
+
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL |
+ B_AX_GNT_BT_TX_VAL;
+ else
+ val |= B_AX_WL_ACT_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+ B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL |
+ B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+ B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL |
+ B_AX_GNT_BT_TX_VAL;
+ else
+ val |= B_AX_WL_ACT_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL |
+ B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL |
+ B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL |
+ B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL;
+
+ rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
- u8 val;
+ u16 val;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
@@ -3403,8 +3796,9 @@ int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
(plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) |
- (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0);
- rtw89_write8(rtwdev, reg, val);
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) |
+ B_AX_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
return 0;
}
@@ -3442,6 +3836,28 @@ int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path);
+
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ int i;
+
+ if (wl)
+ return 0;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ }
+
+ return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
{
@@ -3845,3 +4261,51 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
return 0;
}
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+{
+ u32 val32;
+ int ret;
+
+ val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+ rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n",
+ offset, val, mask);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_write_xtal_si);
+
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+{
+ u32 val32;
+ int ret;
+
+ val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) |
+ FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1);
+ rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset);
+ return ret;
+ }
+
+ *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index b7d13edf7dd1..b797667c78c6 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -301,6 +301,7 @@ enum rtw89_mac_c2h_ofld_func {
RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
RTW89_MAC_C2H_FUNC_BCN_RESEND,
RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+ RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9,
RTW89_MAC_C2H_FUNC_OFLD_MAX,
};
@@ -308,6 +309,7 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_REC_ACK,
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
+ RTW89_MAC_C2H_FUNC_BCN_CNT,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
@@ -670,16 +672,26 @@ enum mac_ax_err_info {
MAC_AX_SET_ERR_MAX,
};
-extern const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie;
-extern const struct rtw89_dle_size wde_size0;
-extern const struct rtw89_dle_size wde_size4;
-extern const struct rtw89_dle_size ple_size0;
-extern const struct rtw89_dle_size ple_size4;
-extern const struct rtw89_wde_quota wde_qt0;
-extern const struct rtw89_wde_quota wde_qt4;
-extern const struct rtw89_ple_quota ple_qt4;
-extern const struct rtw89_ple_quota ple_qt5;
-extern const struct rtw89_ple_quota ple_qt13;
+extern const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie;
+extern const struct rtw89_dle_size rtw89_wde_size0;
+extern const struct rtw89_dle_size rtw89_wde_size4;
+extern const struct rtw89_dle_size rtw89_wde_size18;
+extern const struct rtw89_dle_size rtw89_wde_size19;
+extern const struct rtw89_dle_size rtw89_ple_size0;
+extern const struct rtw89_dle_size rtw89_ple_size4;
+extern const struct rtw89_dle_size rtw89_ple_size18;
+extern const struct rtw89_dle_size rtw89_ple_size19;
+extern const struct rtw89_wde_quota rtw89_wde_qt0;
+extern const struct rtw89_wde_quota rtw89_wde_qt4;
+extern const struct rtw89_wde_quota rtw89_wde_qt17;
+extern const struct rtw89_wde_quota rtw89_wde_qt18;
+extern const struct rtw89_ple_quota rtw89_ple_qt4;
+extern const struct rtw89_ple_quota rtw89_ple_qt5;
+extern const struct rtw89_ple_quota rtw89_ple_qt13;
+extern const struct rtw89_ple_quota rtw89_ple_qt44;
+extern const struct rtw89_ple_quota rtw89_ple_qt45;
+extern const struct rtw89_ple_quota rtw89_ple_qt46;
+extern const struct rtw89_ple_quota rtw89_ple_qt47;
static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
{
@@ -779,24 +791,31 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
- u16 *tx_en, enum rtw89_sch_tx_sel sel);
-int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en);
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
+void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
@@ -810,6 +829,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en);
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause);
static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
{
@@ -868,4 +888,44 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta, u8 *tx_retry);
+enum rtw89_mac_xtal_si_offset {
+ XTAL_SI_XTAL_SC_XI = 0x04,
+#define XTAL_SC_XI_MASK GENMASK(7, 0)
+ XTAL_SI_XTAL_SC_XO = 0x05,
+#define XTAL_SC_XO_MASK GENMASK(7, 0)
+ XTAL_SI_PWR_CUT = 0x10,
+#define XTAL_SI_SMALL_PWR_CUT BIT(0)
+#define XTAL_SI_BIG_PWR_CUT BIT(1)
+ XTAL_SI_XTAL_XMD_2 = 0x24,
+#define XTAL_SI_LDO_LPS GENMASK(6, 4)
+ XTAL_SI_XTAL_XMD_4 = 0x26,
+#define XTAL_SI_LPS_CAP GENMASK(3, 0)
+ XTAL_SI_CV = 0x41,
+ XTAL_SI_LOW_ADDR = 0x62,
+#define XTAL_SI_LOW_ADDR_MASK GENMASK(7, 0)
+ XTAL_SI_CTRL = 0x63,
+#define XTAL_SI_MODE_SEL_MASK GENMASK(7, 6)
+#define XTAL_SI_RDY BIT(5)
+#define XTAL_SI_HIGH_ADDR_MASK GENMASK(2, 0)
+ XTAL_SI_READ_VAL = 0x7A,
+ XTAL_SI_WL_RFC_S0 = 0x80,
+#define XTAL_SI_RF00 BIT(0)
+ XTAL_SI_WL_RFC_S1 = 0x81,
+#define XTAL_SI_RF10 BIT(0)
+ XTAL_SI_ANAPAR_WL = 0x90,
+#define XTAL_SI_SRAM2RFC BIT(7)
+#define XTAL_SI_GND_SHDN_WL BIT(6)
+#define XTAL_SI_SHDN_WL BIT(5)
+#define XTAL_SI_RFC2RF BIT(4)
+#define XTAL_SI_OFF_EI BIT(3)
+#define XTAL_SI_OFF_WEI BIT(2)
+#define XTAL_SI_PON_EI BIT(1)
+#define XTAL_SI_PON_WEI BIT(0)
+ XTAL_SI_SRAM_CTRL = 0xA1,
+#define FULL_BIT_MASK GENMASK(7, 0)
+};
+
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index a322259f4cc4..fca9f82bb462 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -66,6 +66,9 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
@@ -102,14 +105,16 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
int ret = 0;
mutex_lock(&rtwdev->mutex);
+ rtwvif->rtwdev = rtwdev;
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
rtw89_leave_ps_mode(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
rtw89_vif_type_mapping(vif, false);
rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port,
- RTW89_MAX_HW_PORT_NUM);
- if (rtwvif->port == RTW89_MAX_HW_PORT_NUM) {
+ RTW89_PORT_NUM);
+ if (rtwvif->port == RTW89_PORT_NUM) {
ret = -ENOSPC;
goto out;
}
@@ -141,6 +146,8 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ cancel_work_sync(&rtwvif->update_beacon_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP);
@@ -161,7 +168,7 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
rtw89_leave_ps_mode(rtwdev);
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
- FIF_BCN_PRBRESP_PROMISC;
+ FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ;
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
@@ -192,6 +199,15 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
}
}
+ if (changed_flags & FIF_PROBE_REQ) {
+ if (*new_flags & FIF_PROBE_REQ) {
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_UC_CAM_MATCH;
+ } else {
+ rtwdev->hal.rx_fltr |= B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr |= B_AX_A_UC_CAM_MATCH;
+ }
+ }
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
@@ -311,6 +327,9 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
return;
}
+
+ rtw89_vif_type_mapping(vif, true);
+
rtw89_core_sta_assoc(rtwdev, vif, sta);
}
@@ -331,6 +350,13 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_store_op_chan(rtwdev);
+ } else {
+ /* Abort ongoing scan if cancel_scan isn't issued
+ * when disconnected by peer
+ */
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, vif);
}
}
@@ -340,6 +366,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
}
+ if (changed & BSS_CHANGED_BEACON)
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -352,6 +381,49 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
+ rtw89_cam_bssid_changed(rtwdev, rtwvif);
+ rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ rtw89_chip_rfk_channel(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static
+void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+
+ ieee80211_queue_work(rtwdev->hw, &rtwvif->update_beacon_work);
+
+ return 0;
+}
+
static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
@@ -476,7 +548,6 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
- rtw89_fw_h2c_ba_cam(rtwdev, false, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -486,11 +557,17 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
rtw89_leave_ps_mode(rtwdev);
- rtw89_fw_h2c_ba_cam(rtwdev, true, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ mutex_unlock(&rtwdev->mutex);
+ break;
case IEEE80211_AMPDU_RX_STOP:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ mutex_unlock(&rtwdev->mutex);
break;
default:
WARN_ON(1);
@@ -617,15 +694,9 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
- struct rtw89_hal *hal = &rtwdev->hal;
mutex_lock(&rtwdev->mutex);
- rtwdev->scanning = true;
- rtw89_leave_lps(rtwdev);
- rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
- rtw89_chip_rfk_scan(rtwdev, true);
- rtw89_hci_recalc_int_mit(rtwdev);
- rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
+ rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -633,14 +704,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw89_dev *rtwdev = hw->priv;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
- rtw89_chip_rfk_scan(rtwdev, false);
- rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
- rtwdev->scanning = false;
- rtwdev->dig.bypass_dig = true;
+ rtw89_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
@@ -653,6 +719,46 @@ static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
rtw89_ser_recfg_done(rtwdev);
}
+static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret = 0;
+
+ if (!rtwdev->fw.scan_offload)
+ return 1;
+
+ if (rtwdev->scanning)
+ return -EBUSY;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_hw_scan_start(rtwdev, vif, req);
+ ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
+ }
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ if (!rtwdev->fw.scan_offload)
+ return;
+
+ if (!rtwdev->scanning)
+ return;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_hw_scan_abort(rtwdev, vif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -663,6 +769,9 @@ const struct ieee80211_ops rtw89_ops = {
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
.bss_info_changed = rtw89_ops_bss_info_changed,
+ .start_ap = rtw89_ops_start_ap,
+ .stop_ap = rtw89_ops_stop_ap,
+ .set_tim = rtw89_ops_set_tim,
.conf_tx = rtw89_ops_conf_tx,
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
@@ -676,6 +785,8 @@ const struct ieee80211_ops rtw89_ops = {
.sw_scan_start = rtw89_ops_sw_scan_start,
.sw_scan_complete = rtw89_ops_sw_scan_complete,
.reconfig_complete = rtw89_ops_reconfig_complete,
+ .hw_scan = rtw89_ops_hw_scan,
+ .cancel_hw_scan = rtw89_ops_cancel_hw_scan,
.set_sar_specs = rtw89_ops_set_sar_specs,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 2c94762e4f93..e79bfc335b44 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -62,7 +62,7 @@ static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_tx_ring *tx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
- u32 addr_idx = bd_ring->addr_idx;
+ u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
@@ -121,7 +121,7 @@ static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- u32 addr_idx = bd_ring->addr_idx;
+ u32 addr_idx = bd_ring->addr.idx;
u32 cnt, idx;
idx = rtw89_read32(rtwdev, addr_idx);
@@ -304,7 +304,7 @@ static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
cnt -= rx_cnt;
}
- rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+ rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
@@ -555,7 +555,7 @@ static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
cnt -= release_cnt;
}
- rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+ rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
}
static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
@@ -598,7 +598,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
- reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
hw_idx_next = (hw_idx + 1) % bd_ring->len;
@@ -697,71 +697,110 @@ exit:
return irqret;
}
-#define case_TXCHADDRS(txch) \
- case RTW89_TXCH_##txch: \
- *addr_num = R_AX_##txch##_TXBD_NUM; \
- *addr_idx = R_AX_##txch##_TXBD_IDX; \
- *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \
- *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \
- *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \
- break
-
-static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch,
- u32 *addr_num,
- u32 *addr_idx,
- u32 *addr_bdram,
- u32 *addr_desa_l,
- u32 *addr_desa_h)
-{
- switch (txch) {
- case_TXCHADDRS(ACH0);
- case_TXCHADDRS(ACH1);
- case_TXCHADDRS(ACH2);
- case_TXCHADDRS(ACH3);
- case_TXCHADDRS(ACH4);
- case_TXCHADDRS(ACH5);
- case_TXCHADDRS(ACH6);
- case_TXCHADDRS(ACH7);
- case_TXCHADDRS(CH8);
- case_TXCHADDRS(CH9);
- case_TXCHADDRS(CH10);
- case_TXCHADDRS(CH11);
- case_TXCHADDRS(CH12);
- default:
+#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
+ [RTW89_TXCH_##txch] = { \
+ .num = R_AX_##txch##_TXBD_NUM ##v, \
+ .idx = R_AX_##txch##_TXBD_IDX ##v, \
+ .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+ .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+ }
+
+#define DEF_TXCHADDRS(info, txch, v...) \
+ [RTW89_TXCH_##txch] = { \
+ .num = R_AX_##txch##_TXBD_NUM, \
+ .idx = R_AX_##txch##_TXBD_IDX, \
+ .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
+ .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
+ }
+
+#define DEF_RXCHADDRS(info, rxch, v...) \
+ [RTW89_RXCH_##rxch] = { \
+ .num = R_AX_##rxch##_RXBD_NUM ##v, \
+ .idx = R_AX_##rxch##_RXBD_IDX ##v, \
+ .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
+ .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
+ }
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
+ .tx = {
+ DEF_TXCHADDRS(info, ACH0),
+ DEF_TXCHADDRS(info, ACH1),
+ DEF_TXCHADDRS(info, ACH2),
+ DEF_TXCHADDRS(info, ACH3),
+ DEF_TXCHADDRS(info, ACH4),
+ DEF_TXCHADDRS(info, ACH5),
+ DEF_TXCHADDRS(info, ACH6),
+ DEF_TXCHADDRS(info, ACH7),
+ DEF_TXCHADDRS(info, CH8),
+ DEF_TXCHADDRS(info, CH9),
+ DEF_TXCHADDRS_TYPE1(info, CH10),
+ DEF_TXCHADDRS_TYPE1(info, CH11),
+ DEF_TXCHADDRS(info, CH12),
+ },
+ .rx = {
+ DEF_RXCHADDRS(info, RXQ),
+ DEF_RXCHADDRS(info, RPQ),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
+
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
+ .tx = {
+ DEF_TXCHADDRS(info, ACH0, _V1),
+ DEF_TXCHADDRS(info, ACH1, _V1),
+ DEF_TXCHADDRS(info, ACH2, _V1),
+ DEF_TXCHADDRS(info, ACH3, _V1),
+ DEF_TXCHADDRS(info, ACH4, _V1),
+ DEF_TXCHADDRS(info, ACH5, _V1),
+ DEF_TXCHADDRS(info, ACH6, _V1),
+ DEF_TXCHADDRS(info, ACH7, _V1),
+ DEF_TXCHADDRS(info, CH8, _V1),
+ DEF_TXCHADDRS(info, CH9, _V1),
+ DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
+ DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
+ DEF_TXCHADDRS(info, CH12, _V1),
+ },
+ .rx = {
+ DEF_RXCHADDRS(info, RXQ, _V1),
+ DEF_RXCHADDRS(info, RPQ, _V1),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
+
+#undef DEF_TXCHADDRS_TYPE1
+#undef DEF_TXCHADDRS
+#undef DEF_RXCHADDRS
+
+static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
+ enum rtw89_tx_channel txch,
+ const struct rtw89_pci_ch_dma_addr **addr)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (txch >= RTW89_TXCH_NUM)
return -EINVAL;
- }
+
+ *addr = &info->dma_addr_set->tx[txch];
return 0;
}
-#undef case_TXCHADDRS
-
-#define case_RXCHADDRS(rxch) \
- case RTW89_RXCH_##rxch: \
- *addr_num = R_AX_##rxch##_RXBD_NUM; \
- *addr_idx = R_AX_##rxch##_RXBD_IDX; \
- *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \
- *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \
- break
-
-static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch,
- u32 *addr_num,
- u32 *addr_idx,
- u32 *addr_desa_l,
- u32 *addr_desa_h)
+static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
+ enum rtw89_rx_channel rxch,
+ const struct rtw89_pci_ch_dma_addr **addr)
{
- switch (rxch) {
- case_RXCHADDRS(RXQ);
- case_RXCHADDRS(RPQ);
- default:
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+
+ if (rxch >= RTW89_RXCH_NUM)
return -EINVAL;
- }
+
+ *addr = &info->dma_addr_set->rx[rxch];
return 0;
}
-#undef case_RXCHADDRS
-
static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
{
struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
@@ -837,7 +876,7 @@ static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_t
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 host_idx, addr;
- addr = bd_ring->addr_idx;
+ addr = bd_ring->addr.idx;
host_idx = bd_ring->wp;
rtw89_write16(rtwdev, addr, host_idx);
}
@@ -879,7 +918,7 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
* just use for loop with udelay here.
*/
for (i = 0; i < 60; i++) {
- cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
if (cur_rp == bd_ring->wp)
return;
@@ -1140,9 +1179,9 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = &bd_ram_table[i];
- addr_num = bd_ring->addr_num;
- addr_bdram = bd_ring->addr_bdram;
- addr_desa_l = bd_ring->addr_desa_l;
+ addr_num = bd_ring->addr.num;
+ addr_bdram = bd_ring->addr.bdram;
+ addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
@@ -1158,8 +1197,8 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
- addr_num = bd_ring->addr_num;
- addr_desa_l = bd_ring->addr_desa_l;
+ addr_num = bd_ring->addr.num;
+ addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
@@ -1413,79 +1452,52 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
return 0;
}
-static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 data)
{
- u16 write_addr;
- u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK);
- u8 flag;
- int ret;
-
- write_addr = addr & B_AX_DBI_ADDR_MSK;
- write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK);
- rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data);
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
- if (ret)
- WARN(flag, "failed to write to DBI register, addr=0x%04x\n",
- addr);
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_write_config_byte(pdev, addr, data);
}
-static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 *value)
{
- u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
- u8 flag;
- int ret;
-
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
-
- if (!ret) {
- read_addr = R_AX_DBI_RDATA + (addr & 3);
- *value = rtw89_read8(rtwdev, read_addr);
- } else {
- WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
- ret = -EIO;
- }
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_read_config_byte(pdev, addr, value);
}
-static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value |= bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
-static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value &= ~bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
@@ -1542,9 +1554,10 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
rtwdev->chip->chip_id == RTL8852C)
return 0;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n",
+ RTW89_PCIE_PHY_RATE);
return ret;
}
@@ -1557,17 +1570,18 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
return -EOPNOTSUPP;
}
/* Disable L1BD */
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
return ret;
}
if (bdr_ori & RTW89_PCIE_BIT_L1) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL,
- bdr_ori & ~RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori & ~RTW89_PCIE_BIT_L1);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
l1_flag = true;
@@ -1662,14 +1676,17 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
}
/* CLK delay = 0 */
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_0);
end:
/* Set L1BD to ori */
if (l1_flag) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
}
@@ -2210,14 +2227,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
u32 desc_size, u32 len,
enum rtw89_tx_channel txch)
{
+ const struct rtw89_pci_ch_dma_addr *txch_addr;
int ring_sz = desc_size * len;
u8 *head;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
- u32 addr_desa_h;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@@ -2226,8 +2239,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
goto err;
}
- ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram,
- &addr_desa_l, &addr_desa_h);
+ ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
if (ret) {
rtw89_err(rtwdev, "failed to get address of txch %d", txch);
goto err_free_wd_ring;
@@ -2244,11 +2256,7 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
tx_ring->bd_ring.dma = dma;
tx_ring->bd_ring.len = len;
tx_ring->bd_ring.desc_size = desc_size;
- tx_ring->bd_ring.addr_num = addr_num;
- tx_ring->bd_ring.addr_idx = addr_idx;
- tx_ring->bd_ring.addr_bdram = addr_bdram;
- tx_ring->bd_ring.addr_desa_l = addr_desa_l;
- tx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ tx_ring->bd_ring.addr = *txch_addr;
tx_ring->bd_ring.wp = 0;
tx_ring->bd_ring.rp = 0;
tx_ring->txch = txch;
@@ -2300,20 +2308,16 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 desc_size, u32 len, u32 rxch)
{
+ const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
u8 *head;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_desa_l;
- u32 addr_desa_h;
int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
- ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx,
- &addr_desa_l, &addr_desa_h);
+ ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
if (ret) {
rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
return ret;
@@ -2329,10 +2333,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
rx_ring->bd_ring.desc_size = desc_size;
- rx_ring->bd_ring.addr_num = addr_num;
- rx_ring->bd_ring.addr_idx = addr_idx;
- rx_ring->bd_ring.addr_desa_l = addr_desa_l;
- rx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ rx_ring->bd_ring.addr = *rxch_addr;
rx_ring->bd_ring.wp = 0;
rx_ring->bd_ring.rp = 0;
rx_ring->buf_sz = buf_sz;
@@ -2552,17 +2553,17 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
if (rtw89_pci_disable_clkreq)
return;
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL,
- PCIE_CLKDLY_HW_30US);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_30US);
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
if (ret)
rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -2576,7 +2577,7 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
if (rtw89_pci_disable_aspm_l1)
return;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
@@ -2584,16 +2585,16 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
if (ret)
rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -2657,11 +2658,11 @@ static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
int ret;
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
if (ret)
rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
enable ? "set" : "unset", ret);
@@ -2878,10 +2879,10 @@ static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
return;
/* Hardware need write the reg twice to ensure the setting work */
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
}
static int __maybe_unused rtw89_pci_resume(struct device *dev)
@@ -2932,11 +2933,11 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.napi_poll = rtw89_pci_napi_poll,
};
-static int rtw89_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
+ const struct rtw89_driver_info *info;
int driver_data_size;
int ret;
@@ -2957,13 +2958,9 @@ static int rtw89_pci_probe(struct pci_dev *pdev,
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
- switch (id->driver_data) {
- case RTL8852A:
- rtwdev->chip = &rtw8852a_chip_info;
- break;
- default:
- return -ENOENT;
- }
+ info = (const struct rtw89_driver_info *)id->driver_data;
+ rtwdev->chip = info->chip;
+ rtwdev->pci_info = info->bus.pci;
ret = rtw89_core_init(rtwdev);
if (ret) {
@@ -3022,8 +3019,9 @@ err_release_hw:
return ret;
}
+EXPORT_SYMBOL(rtw89_pci_probe);
-static void rtw89_pci_remove(struct pci_dev *pdev)
+void rtw89_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw89_dev *rtwdev;
@@ -3038,22 +3036,7 @@ static void rtw89_pci_remove(struct pci_dev *pdev)
rtw89_core_deinit(rtwdev);
ieee80211_free_hw(hw);
}
-
-static const struct pci_device_id rtw89_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A },
- {},
-};
-MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table);
-
-static struct pci_driver rtw89_pci_driver = {
- .name = "rtw89_pci",
- .id_table = rtw89_pci_id_table,
- .probe = rtw89_pci_probe,
- .remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
-};
-module_pci_driver(rtw89_pci_driver);
+EXPORT_SYMBOL(rtw89_pci_remove);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 20e6767ea5c4..b84acd0d0582 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -130,6 +130,10 @@
#define R_AX_CH10_TXBD_IDX 0x137C /* Management Queue band 1 */
#define R_AX_CH11_TXBD_IDX 0x1380 /* HI Queue band 1 */
#define R_AX_CH12_TXBD_IDX 0x1080 /* FWCMD Queue */
+#define R_AX_CH10_TXBD_IDX_V1 0x11D0
+#define R_AX_CH11_TXBD_IDX_V1 0x11D4
+#define R_AX_RXQ_RXBD_IDX_V1 0x1218
+#define R_AX_RPQ_RXBD_IDX_V1 0x121C
#define TXBD_HW_IDX_MASK GENMASK(27, 16)
#define TXBD_HOST_IDX_MASK GENMASK(11, 0)
@@ -163,6 +167,36 @@
#define R_AX_RXQ_RXBD_DESA_H 0x1104
#define R_AX_RPQ_RXBD_DESA_L 0x1108
#define R_AX_RPQ_RXBD_DESA_H 0x110C
+#define R_AX_RXQ_RXBD_DESA_L_V1 0x1220
+#define R_AX_RXQ_RXBD_DESA_H_V1 0x1224
+#define R_AX_RPQ_RXBD_DESA_L_V1 0x1228
+#define R_AX_RPQ_RXBD_DESA_H_V1 0x122C
+#define R_AX_ACH0_TXBD_DESA_L_V1 0x1230
+#define R_AX_ACH0_TXBD_DESA_H_V1 0x1234
+#define R_AX_ACH1_TXBD_DESA_L_V1 0x1238
+#define R_AX_ACH1_TXBD_DESA_H_V1 0x123C
+#define R_AX_ACH2_TXBD_DESA_L_V1 0x1240
+#define R_AX_ACH2_TXBD_DESA_H_V1 0x1244
+#define R_AX_ACH3_TXBD_DESA_L_V1 0x1248
+#define R_AX_ACH3_TXBD_DESA_H_V1 0x124C
+#define R_AX_ACH4_TXBD_DESA_L_V1 0x1250
+#define R_AX_ACH4_TXBD_DESA_H_V1 0x1254
+#define R_AX_ACH5_TXBD_DESA_L_V1 0x1258
+#define R_AX_ACH5_TXBD_DESA_H_V1 0x125C
+#define R_AX_ACH6_TXBD_DESA_L_V1 0x1260
+#define R_AX_ACH6_TXBD_DESA_H_V1 0x1264
+#define R_AX_ACH7_TXBD_DESA_L_V1 0x1268
+#define R_AX_ACH7_TXBD_DESA_H_V1 0x126C
+#define R_AX_CH8_TXBD_DESA_L_V1 0x1270
+#define R_AX_CH8_TXBD_DESA_H_V1 0x1274
+#define R_AX_CH9_TXBD_DESA_L_V1 0x1278
+#define R_AX_CH9_TXBD_DESA_H_V1 0x127C
+#define R_AX_CH12_TXBD_DESA_L_V1 0x1280
+#define R_AX_CH12_TXBD_DESA_H_V1 0x1284
+#define R_AX_CH10_TXBD_DESA_L_V1 0x1458
+#define R_AX_CH10_TXBD_DESA_H_V1 0x145C
+#define R_AX_CH11_TXBD_DESA_L_V1 0x1460
+#define R_AX_CH11_TXBD_DESA_H_V1 0x1464
#define B_AX_DESC_NUM_MSK GENMASK(11, 0)
#define R_AX_RXQ_RXBD_NUM 0x1020
@@ -180,6 +214,10 @@
#define R_AX_CH10_TXBD_NUM 0x1338
#define R_AX_CH11_TXBD_NUM 0x133A
#define R_AX_CH12_TXBD_NUM 0x1038
+#define R_AX_RXQ_RXBD_NUM_V1 0x1210
+#define R_AX_RPQ_RXBD_NUM_V1 0x1212
+#define R_AX_CH10_TXBD_NUM_V1 0x1438
+#define R_AX_CH11_TXBD_NUM_V1 0x143A
#define R_AX_ACH0_BDRAM_CTRL 0x1200
#define R_AX_ACH1_BDRAM_CTRL 0x1204
@@ -194,6 +232,19 @@
#define R_AX_CH10_BDRAM_CTRL 0x1320
#define R_AX_CH11_BDRAM_CTRL 0x1324
#define R_AX_CH12_BDRAM_CTRL 0x1228
+#define R_AX_ACH0_BDRAM_CTRL_V1 0x1300
+#define R_AX_ACH1_BDRAM_CTRL_V1 0x1304
+#define R_AX_ACH2_BDRAM_CTRL_V1 0x1308
+#define R_AX_ACH3_BDRAM_CTRL_V1 0x130C
+#define R_AX_ACH4_BDRAM_CTRL_V1 0x1310
+#define R_AX_ACH5_BDRAM_CTRL_V1 0x1314
+#define R_AX_ACH6_BDRAM_CTRL_V1 0x1318
+#define R_AX_ACH7_BDRAM_CTRL_V1 0x131C
+#define R_AX_CH8_BDRAM_CTRL_V1 0x1320
+#define R_AX_CH9_BDRAM_CTRL_V1 0x1324
+#define R_AX_CH12_BDRAM_CTRL_V1 0x1328
+#define R_AX_CH10_BDRAM_CTRL_V1 0x1420
+#define R_AX_CH11_BDRAM_CTRL_V1 0x1424
#define BDRAM_SIDX_MASK GENMASK(7, 0)
#define BDRAM_MAX_MASK GENMASK(15, 8)
#define BDRAM_MIN_MASK GENMASK(23, 16)
@@ -382,6 +433,23 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+struct rtw89_pci_ch_dma_addr {
+ u32 num;
+ u32 idx;
+ u32 bdram;
+ u32 desa_l;
+ u32 desa_h;
+};
+
+struct rtw89_pci_ch_dma_addr_set {
+ struct rtw89_pci_ch_dma_addr tx[RTW89_TXCH_NUM];
+ struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
+};
+
+struct rtw89_pci_info {
+ const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+};
+
struct rtw89_pci_bd_ram {
u8 start_idx;
u8 max_num;
@@ -469,11 +537,7 @@ struct rtw89_pci_dma_ring {
u8 desc_size;
dma_addr_t dma;
- u32 addr_num;
- u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
- u32 addr_desa_h;
+ struct rtw89_pci_ch_dma_addr addr;
u32 len;
u32 wp; /* host idx */
@@ -626,5 +690,12 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
}
extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
+
+struct pci_device_id;
+
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw89_pci_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 147009888de0..ac211d897311 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -4,6 +4,7 @@
#include "debug.h"
#include "fw.h"
+#include "mac.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
@@ -117,17 +118,28 @@ static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
else if (rssi_lv == 1)
return 0xfffffffffffffff0ULL;
else if (rssi_lv == 2)
- return 0xffffffffffffffe0ULL;
+ return 0xffffffffffffefe0ULL;
else if (rssi_lv == 3)
- return 0xffffffffffffffc0ULL;
+ return 0xffffffffffffcfc0ULL;
else if (rssi_lv == 4)
- return 0xffffffffffffff80ULL;
+ return 0xffffffffffff8f80ULL;
else if (rssi_lv >= 5)
- return 0xffffffffffffff00ULL;
+ return 0xffffffffffff0f00ULL;
return 0xffffffffffffffffULL;
}
+static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -150,6 +162,11 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
RA_MASK_OFDM_RATES);
break;
+ case RTW89_BAND_6G:
+ band = NL80211_BAND_6GHZ;
+ cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
+ RA_MASK_OFDM_RATES);
+ break;
default:
rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
return -1;
@@ -194,8 +211,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_ra_info *ra = &rtwsta->ra;
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- u64 high_rate_mask = 0;
u64 ra_mask = 0;
+ u64 ra_mask_bak;
u8 mode = 0;
u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
u8 bw_mode = 0;
@@ -243,37 +260,54 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ldpc_en = 1;
}
- if (rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ switch (rtwdev->hal.current_band_type) {
+ case RTW89_BAND_2G:
+ ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
mode |= RTW89_RA_MODE_CCK;
else
mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
- } else {
+ break;
+ case RTW89_BAND_5G:
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
+ break;
+ case RTW89_BAND_6G:
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_6GHZ] << 4;
+ mode |= RTW89_RA_MODE_OFDM;
+ break;
+ default:
+ rtw89_err(rtwdev, "Unknown band type\n");
+ break;
}
+ ra_mask_bak = ra_mask;
+
if (mode >= RTW89_RA_MODE_HT) {
+ u64 mask = 0;
for (i = 0; i < rtwdev->hal.tx_nss; i++)
- high_rate_mask |= high_rate_masks[i];
- ra_mask &= high_rate_mask;
+ mask |= high_rate_masks[i];
if (mode & RTW89_RA_MODE_OFDM)
- ra_mask |= RA_MASK_SUBOFDM_RATES;
+ mask |= RA_MASK_SUBOFDM_RATES;
if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
+ mask |= RA_MASK_SUBCCK_RATES;
+ ra_mask &= mask;
} else if (mode & RTW89_RA_MODE_OFDM) {
- if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
- ra_mask |= RA_MASK_OFDM_RATES;
- } else {
- ra_mask = RA_MASK_CCK_RATES;
+ ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
}
- if (mode != RTW89_RA_MODE_CCK) {
+ if (mode != RTW89_RA_MODE_CCK)
ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
- ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
- }
+
+ ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ bw_mode = RTW89_CHANNEL_WIDTH_160;
+ sgi = sta->vht_cap.vht_supported &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
+ break;
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW89_CHANNEL_WIDTH_80;
sgi = sta->vht_cap.vht_supported &&
@@ -568,6 +602,13 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
return txsc_idx;
}
+EXPORT_SYMBOL(rtw89_phy_get_txsc);
+
+static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
+{
+ return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
+ !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
+}
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
@@ -591,6 +632,56 @@ u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf);
+static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ bool busy;
+ bool done;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+ 1, 30, false, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "read rf busy swsi\n");
+ return INV_RF_DATA;
+ }
+
+ mask &= RFREG_MASK;
+
+ val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
+ FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
+ 30, false, rtwdev, R_SWSI_V1,
+ B_SWSI_R_DATA_DONE_V1);
+ if (ret) {
+ rtw89_err(rtwdev, "read swsi busy\n");
+ return INV_RF_DATA;
+ }
+
+ return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
+}
+
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -616,6 +707,60 @@ bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf);
+static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask,
+ u32 data)
+{
+ u8 bit_shift;
+ u32 val;
+ bool busy, b_msk_en = false;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
+ 1, 30, false, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "write rf busy swsi\n");
+ return false;
+ }
+
+ data &= RFREG_MASK;
+ mask &= RFREG_MASK;
+
+ if (mask != RFREG_MASK) {
+ b_msk_en = true;
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
+ mask);
+ bit_shift = __ffs(mask);
+ data = (data << bit_shift) & RFREG_MASK;
+ }
+
+ val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
+ FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
+ FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
+ FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
+
+ rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
+
+ return true;
+}
+
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+
static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -717,6 +862,21 @@ static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
}
}
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
+
+ if (reg->addr < 0x100)
+ return;
+
+ rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
+ (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
+}
+EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
+
static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
const struct rtw89_phy_table *table,
u32 *headline_size, u32 *headline_idx,
@@ -888,6 +1048,8 @@ static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
{
+ void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path, void *data);
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_phy_table *rf_table;
struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
@@ -898,13 +1060,13 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
return;
for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
- rf_reg_info->rf_path = path;
rf_table = chip->rf_table[path];
- rtw89_phy_init_reg(rtwdev, rf_table, rtw89_phy_config_rf_reg,
- (void *)rf_reg_info);
+ rf_reg_info->rf_path = rf_table->rf_path;
+ config = rf_table->config ? rf_table->config : rtw89_phy_config_rf_reg;
+ rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
- path);
+ rf_reg_info->rf_path);
}
kfree(rf_reg_info);
}
@@ -972,6 +1134,7 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_mask(rtwdev, addr, mask, data);
}
+EXPORT_SYMBOL(rtw89_phy_write32_idx);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val)
@@ -995,6 +1158,7 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
}
}
+EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_MAX,
@@ -1003,6 +1167,7 @@ const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX,
[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX,
};
+EXPORT_SYMBOL(rtw89_rs_idx_max);
const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_CCK] = 1,
@@ -1011,6 +1176,7 @@ const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX,
[RTW89_RS_OFFSET] = 1,
};
+EXPORT_SYMBOL(rtw89_rs_nss_max);
static const u8 _byr_of_rs[] = {
[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
@@ -1044,6 +1210,7 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
}
}
+EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \
({ \
@@ -1074,9 +1241,38 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_byrate);
+
+static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
+{
+ switch (channel_6g) {
+ case 1 ... 29:
+ return (channel_6g - 1) / 2;
+ case 33 ... 61:
+ return (channel_6g - 3) / 2;
+ case 65 ... 93:
+ return (channel_6g - 5) / 2;
+ case 97 ... 125:
+ return (channel_6g - 7) / 2;
+ case 129 ... 157:
+ return (channel_6g - 9) / 2;
+ case 161 ... 189:
+ return (channel_6g - 11) / 2;
+ case 193 ... 221:
+ return (channel_6g - 13) / 2;
+ case 225 ... 253:
+ return (channel_6g - 15) / 2;
+ default:
+ rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
+ return 0;
+ }
+}
-static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
{
+ if (band == RTW89_BAND_6G)
+ return rtw89_channel_6g_to_idx(rtwdev, channel);
+
switch (channel) {
case 1 ... 14:
return channel - 1;
@@ -1096,8 +1292,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
u8 band = rtwdev->hal.current_band_type;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt = 0, sar;
@@ -1114,6 +1310,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf]
[RTW89_WW][ch_idx];
break;
+ case RTW89_BAND_6G:
+ lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf][regd][ch_idx];
+ if (!lmt)
+ lmt = (*chip->txpwr_lmt_6g)[bw][ntx][rs][bf]
+ [RTW89_WW][ch_idx];
+ break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
return 0;
@@ -1124,6 +1326,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
return min(lmt, sar);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
do { \
@@ -1151,14 +1354,14 @@ static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 ntx, u8 ch, u8 pri_ch)
{
__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch - 2);
__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
__fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
- ntx, RTW89_RS_OFDM, ch - 2);
+ ntx, RTW89_RS_OFDM, pri_ch);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
@@ -1169,14 +1372,14 @@ static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
u8 i;
__fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
- ntx, RTW89_RS_OFDM, ch - 6);
+ ntx, RTW89_RS_OFDM, pri_ch);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
@@ -1201,10 +1404,82 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
}
+static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx, u8 ch, u8 pri_ch)
+{
+ s8 val_0p5_n[RTW89_BF_NUM];
+ s8 val_0p5_p[RTW89_BF_NUM];
+ s8 val_2p5_n[RTW89_BF_NUM];
+ s8 val_2p5_p[RTW89_BF_NUM];
+ u8 i;
+
+ /* fill ofdm section */
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, pri_ch);
+
+ /* fill mcs 20m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 14);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 10);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 10);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 14);
+
+ /* fill mcs 40m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 12);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 12);
+
+ /* fill mcs 80m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch - 8);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch + 8);
+
+ /* fill mcs 160m section */
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, RTW89_CHANNEL_WIDTH_160,
+ ntx, RTW89_RS_MCS, ch);
+
+ /* fill mcs 40m 0p5 section */
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
+
+ /* fill mcs 40m 2p5 section */
+ __fill_txpwr_limit_nonbf_bf(val_2p5_n, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 8);
+ __fill_txpwr_limit_nonbf_bf(val_2p5_p, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 8);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
+}
+
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
u8 ntx)
{
+ u8 pri_ch = rtwdev->hal.current_primary_channel;
u8 ch = rtwdev->hal.current_channel;
u8 bw = rtwdev->hal.current_band_width;
@@ -1215,20 +1490,24 @@ void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch, pri_ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch, pri_ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, ntx, ch, pri_ch);
break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
u8 band = rtwdev->hal.current_band_type;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt_ru = 0, sar;
@@ -1245,6 +1524,12 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx]
[RTW89_WW][ch_idx];
break;
+ case RTW89_BAND_6G:
+ lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx][regd][ch_idx];
+ if (!lmt_ru)
+ lmt_ru = (*chip->txpwr_lmt_ru_6g)[ru][ntx]
+ [RTW89_WW][ch_idx];
+ break;
default:
rtw89_warn(rtwdev, "unknown band type: %d\n", band);
return 0;
@@ -1319,6 +1604,31 @@ rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
ntx, ch + 6);
}
+static void
+rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx, u8 ch)
+{
+ static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
+ int i;
+
+ static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
+ for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
+ lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ RTW89_RU26,
+ ntx,
+ ch + ofst[i]);
+ lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ RTW89_RU52,
+ ntx,
+ ch + ofst[i]);
+ lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ RTW89_RU106,
+ ntx,
+ ch + ofst[i]);
+ }
+}
+
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx)
@@ -1338,8 +1648,12 @@ void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
case RTW89_CHANNEL_WIDTH_80:
rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, ntx, ch);
+ break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit_ru);
struct rtw89_phy_iter_ra_data {
struct rtw89_dev *rtwdev;
@@ -1401,13 +1715,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
break;
}
- if (bw == RTW89_CHANNEL_WIDTH_80)
- ra_report->txrate.bw = RATE_INFO_BW_80;
- else if (bw == RTW89_CHANNEL_WIDTH_40)
- ra_report->txrate.bw = RATE_INFO_BW_40;
- else
- ra_report->txrate.bw = RATE_INFO_BW_20;
-
+ ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
@@ -1487,15 +1795,25 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
u8 crystal_cap, bool force)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 sc_xi_val, sc_xo_val;
if (!force && cfo->crystal_cap == crystal_cap)
return;
crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
- rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
- rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
- sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
- sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ if (chip->chip_id == RTL8852A) {
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+ sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
+ sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ } else {
+ rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
+ crystal_cap, XTAL_SC_XO_MASK);
+ rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
+ crystal_cap, XTAL_SC_XI_MASK);
+ rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
+ rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
+ }
cfo->crystal_cap = sc_xi_val;
cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
@@ -1525,9 +1843,11 @@ static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
{
+ const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
bool is_linked = rtwdev->total_sta_assoc > 0;
s32 cfo_avg_312;
- s32 dcfo_comp;
+ s32 dcfo_comp_val;
+ u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
int sign;
if (!is_linked) {
@@ -1538,13 +1858,13 @@ static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
if (curr_cfo == 0)
return;
- dcfo_comp = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
+ dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
sign = curr_cfo > 0 ? 1 : -1;
- cfo_avg_312 = (curr_cfo << 3) / 5 + sign * dcfo_comp;
+ cfo_avg_312 = (curr_cfo << dcfo_comp_sft) / 5 + sign * dcfo_comp_val;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: avg_cfo=%d\n", cfo_avg_312);
if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
cfo_avg_312 = -cfo_avg_312;
- rtw89_phy_set_phy_regs(rtwdev, R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK,
+ rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
cfo_avg_312);
}
@@ -1563,8 +1883,12 @@ static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
cfo->crystal_cap = cfo->crystal_cap_default;
cfo->def_x_cap = cfo->crystal_cap;
+ cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
+ cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
cfo->is_adjust = false;
+ cfo->divergence_lock_en = false;
cfo->x_cap_ofst = 0;
+ cfo->lock_cnt = 0;
cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
cfo->apply_compensation = false;
cfo->residual_cfo_acc = 0;
@@ -1782,6 +2106,23 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
return;
}
+ if (cfo->divergence_lock_en) {
+ cfo->lock_cnt++;
+ if (cfo->lock_cnt > CFO_PERIOD_CNT) {
+ cfo->divergence_lock_en = false;
+ cfo->lock_cnt = 0;
+ } else {
+ rtw89_phy_cfo_reset(rtwdev);
+ }
+ return;
+ }
+ if (cfo->crystal_cap >= cfo->x_cap_ub ||
+ cfo->crystal_cap <= cfo->x_cap_lb) {
+ cfo->divergence_lock_en = true;
+ rtw89_phy_cfo_reset(rtwdev);
+ return;
+ }
+
rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
cfo->cfo_avg_pre = new_cfo;
x_cap_update = cfo->crystal_cap != pre_x_cap;
@@ -2845,7 +3186,9 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
- u32 val = 0;
+ u8 ofdm_cca_th;
+ s8 cck_cca_th;
+ u32 pd_val = 0;
under_region += PD_TH_SB_FLTR_CMP_VAL;
@@ -2856,6 +3199,9 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
case RTW89_CHANNEL_WIDTH_80:
under_region += PD_TH_BW80_CMP_VAL;
break;
+ case RTW89_CHANNEL_WIDTH_160:
+ under_region += PD_TH_BW160_CMP_VAL;
+ break;
case RTW89_CHANNEL_WIDTH_20:
fallthrough;
default:
@@ -2866,23 +3212,38 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
dig->dyn_pd_th_max = dig->igi_rssi;
final_rssi = min_t(u8, rssi, dig->igi_rssi);
- final_rssi = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
- PD_TH_MAX_RSSI + under_region);
+ ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
+ PD_TH_MAX_RSSI + under_region);
if (enable) {
- val = (final_rssi - under_region - PD_TH_MIN_RSSI) >> 1;
+ pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "dyn_max=%d, final_rssi=%d, total=%d, PD_low=%d\n",
- dig->igi_rssi, final_rssi, under_region, val);
+ "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
+ final_rssi, ofdm_cca_th, under_region, pd_val);
} else {
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
- val);
+ pd_val);
rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+
+ if (!rtwdev->hal.support_cckpd)
+ return;
+
+ cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
+ pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
+ final_rssi, cck_cca_th, under_region, pd_val);
+
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1,
+ B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable);
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1,
+ B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val);
}
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
@@ -2994,3 +3355,55 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
vif->bss_conf.aid, phy_idx);
}
+
+static void
+_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ udelay(def->data);
+}
+
+static void
+(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
+ [RTW89_RFK_F_WRF] = _rfk_write_rf,
+ [RTW89_RFK_F_WM] = _rfk_write32_mask,
+ [RTW89_RFK_F_WS] = _rfk_write32_set,
+ [RTW89_RFK_F_WC] = _rfk_write32_clr,
+ [RTW89_RFK_F_DELAY] = _rfk_delay,
+};
+
+static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
+{
+ const struct rtw89_reg5_def *p = tbl->defs;
+ const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
+
+ for (; p < end; p++)
+ _rfk_handler[p->flag](rtwdev, p);
+}
+EXPORT_SYMBOL(rtw89_rfk_parser);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index b1f059b725a1..adcfcb4c2429 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -8,6 +8,7 @@
#include "core.h"
#define RTW89_PHY_ADDR_OFFSET 0x10000
+#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
#define PHY_HEADLINE_VALID 0xf
@@ -55,6 +56,7 @@
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
+#define CFO_BOUND 32
#define CFO_TP_UPPER 100
#define CFO_TP_LOWER 50
#define CFO_COMP_PERIOD 250
@@ -87,8 +89,11 @@
#define RXB_IDX_MAX 31
#define RXB_IDX_MIN 0
+#define IGI_RSSI_MAX 110
#define PD_TH_MAX_RSSI 70
#define PD_TH_MIN_RSSI 8
+#define CCKPD_TH_MIN_RSSI (-18)
+#define PD_TH_BW160_CMP_VAL 9
#define PD_TH_BW80_CMP_VAL 6
#define PD_TH_BW40_CMP_VAL 3
#define PD_TH_BW20_CMP_VAL 0
@@ -265,6 +270,9 @@ const struct rtw89_phy_reg3_tbl _name ## _tbl = { \
.size = ARRAY_SIZE(_name), \
}
+extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
+extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
+
static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
u32 addr, u8 data)
{
@@ -322,6 +330,65 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask);
}
+enum rtw89_rfk_flag {
+ RTW89_RFK_F_WRF = 0,
+ RTW89_RFK_F_WM = 1,
+ RTW89_RFK_F_WS = 2,
+ RTW89_RFK_F_WC = 3,
+ RTW89_RFK_F_DELAY = 4,
+ RTW89_RFK_F_NUM,
+};
+
+struct rtw89_rfk_tbl {
+ const struct rtw89_reg5_def *defs;
+ u32 size;
+};
+
+#define RTW89_DECLARE_RFK_TBL(_name) \
+const struct rtw89_rfk_tbl _name ## _tbl = { \
+ .defs = _name, \
+ .size = ARRAY_SIZE(_name), \
+}
+
+#define RTW89_DECL_RFK_WRF(_path, _addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WRF, \
+ .path = _path, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define RTW89_DECL_RFK_WM(_addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WM, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define RTW89_DECL_RFK_WS(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WS, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define RTW89_DECL_RFK_WC(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WC, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define RTW89_DECL_RFK_DELAY(_data) \
+ {.flag = RTW89_RFK_F_DELAY, \
+ .data = _data,}
+
+void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl);
+
+#define rtw89_rfk_parser_by_cond(dev, cond, tbl_t, tbl_f) \
+ do { \
+ typeof(dev) __dev = (dev); \
+ if (cond) \
+ rtw89_rfk_parser(__dev, (tbl_t)); \
+ else \
+ rtw89_rfk_parser(__dev, (tbl_f)); \
+ } while (0)
+
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl);
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
@@ -329,10 +396,18 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index e0a416d37d0e..25b106788118 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -8,16 +8,36 @@
#define R_AX_SYS_WL_EFUSE_CTRL 0x000A
#define B_AX_AUTOLOAD_SUS BIT(5)
+#define R_AX_SYS_ISO_CTRL 0x0000
+#define B_AX_PWC_EV2EF_MASK GENMASK(15, 14)
+#define B_AX_PWC_EV2EF_B15 BIT(15)
+#define B_AX_PWC_EV2EF_B14 BIT(14)
+#define B_AX_ISO_EB2CORE BIT(8)
+
#define R_AX_SYS_FUNC_EN 0x0002
#define B_AX_FEN_BB_GLB_RSTN BIT(1)
#define B_AX_FEN_BBRSTB BIT(0)
#define R_AX_SYS_PW_CTRL 0x0004
+#define B_AX_XTAL_OFF_A_DIE BIT(22)
+#define B_AX_DIS_WLBT_PDNSUSEN_SOPC BIT(18)
+#define B_AX_RDY_SYSPWR BIT(17)
+#define B_AX_EN_WLON BIT(16)
+#define B_AX_APDM_HPDN BIT(15)
#define B_AX_PSUS_OFF_CAPC_EN BIT(14)
+#define B_AX_AFSM_PCIE_SUS_EN BIT(12)
+#define B_AX_AFSM_WLSUS_EN BIT(11)
+#define B_AX_APFM_SWLPS BIT(10)
+#define B_AX_APFM_OFFMAC BIT(9)
+#define B_AX_APFN_ONMAC BIT(8)
#define R_AX_SYS_CLK_CTRL 0x0008
#define B_AX_CPU_CLK_EN BIT(14)
+#define R_AX_SYS_ADIE_PAD_PWR_CTRL 0x0018
+#define B_AX_SYM_PADPDN_WL_PTA_1P3 BIT(6)
+#define B_AX_SYM_PADPDN_WL_RFC_1P3 BIT(5)
+
#define R_AX_RSV_CTRL 0x001C
#define B_AX_R_DIS_PRST BIT(6)
#define B_AX_WLOCK_1C_BIT6 BIT(5)
@@ -41,6 +61,17 @@
#define B_AX_EF_ADDR_MASK GENMASK(26, 16)
#define B_AX_EF_DATA_MASK GENMASK(15, 0)
+#define R_AX_EFUSE_CTRL_1_V1 0x0038
+#define B_AX_EF_ENT BIT(31)
+#define B_AX_EF_BURST BIT(19)
+#define B_AX_EF_TEST_SEL_MASK GENMASK(18, 16)
+#define B_AX_EF_TROW_EN BIT(15)
+#define B_AX_EF_ERR_FLAG BIT(14)
+#define B_AX_EF_DSB_EN BIT(11)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
+#define B_AX_WDT_WAKE_PCIE_EN BIT(10)
+#define B_AX_WDT_WAKE_USB_EN BIT(9)
+
#define R_AX_GPIO_MUXCFG 0x0040
#define B_AX_BOOT_MODE BIT(19)
#define B_AX_WL_EECS_EXT_32K_SEL BIT(18)
@@ -72,11 +103,16 @@
#define R_AX_SYS_SDIO_CTRL 0x0070
#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
+#define B_AX_PCIE_CALIB_EN_V1 BIT(12)
#define B_AX_PCIE_AUXCLK_GATE BIT(11)
#define B_AX_LTE_MUX_CTRL_PATH BIT(26)
#define R_AX_PLATFORM_ENABLE 0x0088
#define B_AX_WCPU_EN BIT(1)
+#define B_AX_PLATFORM_EN BIT(0)
+
+#define R_AX_WLLPS_CTRL 0x0090
+#define B_AX_DIS_WLBT_LPSEN_LOPC BIT(1)
#define R_AX_SCOREBOARD 0x00AC
#define B_AX_TOGGLE BIT(31)
@@ -89,11 +125,20 @@
#define R_AX_DBG_PORT_SEL 0x00C0
#define B_AX_DEBUG_ST_MASK GENMASK(31, 0)
+#define R_AX_PMC_DBG_CTRL2 0x00CC
+#define B_AX_SYSON_DIS_PMCR_AX_WRMSK BIT(2)
+
#define R_AX_SYS_CFG1 0x00F0
#define B_AX_CHIP_VER_MASK GENMASK(15, 12)
#define R_AX_SYS_STATUS1 0x00F4
#define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
+#define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3)
+#define MAC_AX_HCI_SEL_SDIO_UART 0
+#define MAC_AX_HCI_SEL_MULTI_USB 1
+#define MAC_AX_HCI_SEL_PCIE_UART 2
+#define MAC_AX_HCI_SEL_PCIE_USB 3
+#define MAC_AX_HCI_SEL_MULTI_SDIO 4
#define R_AX_HALT_H2C_CTRL 0x0160
#define R_AX_HALT_H2C 0x0168
@@ -112,6 +157,7 @@
#define PS_RPWM_TOGGLE BIT(15)
#define PS_RPWM_ACK BIT(14)
#define PS_RPWM_SEQ_NUM GENMASK(13, 12)
+#define PS_RPWM_NOTIFY_WAKE BIT(8)
#define PS_RPWM_STATE 0x7
#define RPWM_SEQ_NUM_MAX 3
#define PS_CPWM_SEQ_NUM GENMASK(13, 12)
@@ -130,6 +176,21 @@
#define R_AX_UDM2 0x01F8
#define R_AX_UDM3 0x01FC
+#define R_AX_LDO_AON_CTRL0 0x0218
+#define B_AX_PD_REGU_L BIT(16)
+
+#define R_AX_WLAN_XTAL_SI_CTRL 0x0270
+#define B_AX_WL_XTAL_SI_CMD_POLL BIT(31)
+#define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30)
+#define B_AX_WL_XTAL_GNT BIT(29)
+#define B_AX_BT_XTAL_GNT BIT(28)
+#define B_AX_WL_XTAL_SI_MODE_MASK GENMASK(25, 24)
+#define XTAL_SI_NORMAL_WRITE 0x00
+#define XTAL_SI_NORMAL_READ 0x01
+#define B_AX_WL_XTAL_SI_BITMASK_MASK GENMASK(23, 16)
+#define B_AX_WL_XTAL_SI_DATA_MASK GENMASK(15, 8)
+#define B_AX_WL_XTAL_SI_ADDR_MASK GENMASK(7, 0)
+
#define R_AX_XTAL_ON_CTRL0 0x0280
#define B_AX_XTAL_SC_LPS BIT(31)
#define B_AX_XTAL_SC_XO_MASK GENMASK(23, 17)
@@ -138,6 +199,11 @@
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+#define R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN 0x02E4
+#define B_AX_LED1_PULL_LOW_EN BIT(18)
+#define B_AX_EESK_PULL_LOW_EN BIT(17)
+#define B_AX_EECS_PULL_LOW_EN BIT(16)
+
#define R_AX_WLRF_CTRL 0x02F0
#define B_AX_WLRF1_CTRL_7 BIT(15)
#define B_AX_WLRF1_CTRL_1 BIT(9)
@@ -162,6 +228,58 @@
#define B_AX_ASFF_FULL_NO_STK BIT(1)
#define B_AX_EN_STUCK_DBG BIT(0)
+#define R_AX_HCI_FC_CTRL_V1 0x1700
+#define R_AX_CH_PAGE_CTRL_V1 0x1704
+
+#define R_AX_ACH0_PAGE_CTRL_V1 0x1710
+#define R_AX_ACH1_PAGE_CTRL_V1 0x1714
+#define R_AX_ACH2_PAGE_CTRL_V1 0x1718
+#define R_AX_ACH3_PAGE_CTRL_V1 0x171C
+#define R_AX_ACH4_PAGE_CTRL_V1 0x1720
+#define R_AX_ACH5_PAGE_CTRL_V1 0x1724
+#define R_AX_ACH6_PAGE_CTRL_V1 0x1728
+#define R_AX_ACH7_PAGE_CTRL_V1 0x172C
+#define R_AX_CH8_PAGE_CTRL_V1 0x1730
+#define R_AX_CH9_PAGE_CTRL_V1 0x1734
+#define R_AX_CH10_PAGE_CTRL_V1 0x1738
+#define R_AX_CH11_PAGE_CTRL_V1 0x173C
+
+#define R_AX_ACH0_PAGE_INFO_V1 0x1750
+#define R_AX_ACH1_PAGE_INFO_V1 0x1754
+#define R_AX_ACH2_PAGE_INFO_V1 0x1758
+#define R_AX_ACH3_PAGE_INFO_V1 0x175C
+#define R_AX_ACH4_PAGE_INFO_V1 0x1760
+#define R_AX_ACH5_PAGE_INFO_V1 0x1764
+#define R_AX_ACH6_PAGE_INFO_V1 0x1768
+#define R_AX_ACH7_PAGE_INFO_V1 0x176C
+#define R_AX_CH8_PAGE_INFO_V1 0x1770
+#define R_AX_CH9_PAGE_INFO_V1 0x1774
+#define R_AX_CH10_PAGE_INFO_V1 0x1778
+#define R_AX_CH11_PAGE_INFO_V1 0x177C
+#define R_AX_CH12_PAGE_INFO_V1 0x1780
+
+#define R_AX_PUB_PAGE_INFO3_V1 0x178C
+#define R_AX_PUB_PAGE_CTRL1_V1 0x1790
+#define R_AX_PUB_PAGE_CTRL2_V1 0x1794
+#define R_AX_PUB_PAGE_INFO1_V1 0x1798
+#define R_AX_PUB_PAGE_INFO2_V1 0x179C
+#define R_AX_WP_PAGE_CTRL1_V1 0x17A0
+#define R_AX_WP_PAGE_CTRL2_V1 0x17A4
+#define R_AX_WP_PAGE_INFO1_V1 0x17A8
+
+#define R_AX_H2CREG_DATA0_V1 0x7140
+#define R_AX_H2CREG_DATA1_V1 0x7144
+#define R_AX_H2CREG_DATA2_V1 0x7148
+#define R_AX_H2CREG_DATA3_V1 0x714C
+#define R_AX_C2HREG_DATA0_V1 0x7150
+#define R_AX_C2HREG_DATA1_V1 0x7154
+#define R_AX_C2HREG_DATA2_V1 0x7158
+#define R_AX_C2HREG_DATA3_V1 0x715C
+#define R_AX_H2CREG_CTRL_V1 0x7160
+#define R_AX_C2HREG_CTRL_V1 0x7164
+
+#define R_AX_HCI_FUNC_EN_V1 0x7880
+
#define R_AX_PHYREG_SET 0x8040
#define PHYREG_SET_ALL_CYCLE 0x8
@@ -194,6 +312,7 @@
#define R_AX_BOOT_DBG 0x83F0
#define R_AX_DMAC_FUNC_EN 0x8400
+#define B_AX_DMAC_CRPRT BIT(31)
#define B_AX_MAC_FUNC_EN BIT(30)
#define B_AX_DMAC_FUNC_EN BIT(29)
#define B_AX_MPDU_PROC_EN BIT(28)
@@ -207,7 +326,10 @@
#define B_AX_PKT_IN_EN BIT(20)
#define B_AX_DLE_CPUIO_EN BIT(19)
#define B_AX_DISPATCHER_EN BIT(18)
+#define B_AX_BBRPT_EN BIT(17)
#define B_AX_MAC_SEC_EN BIT(16)
+#define B_AX_MAC_UN_EN BIT(15)
+#define B_AX_H_AXIDMA_EN BIT(14)
#define R_AX_DMAC_CLK_EN 0x8404
#define B_AX_WD_RLS_CLK_EN BIT(27)
@@ -218,6 +340,7 @@
#define B_AX_PKT_IN_CLK_EN BIT(20)
#define B_AX_DLE_CPUIO_CLK_EN BIT(19)
#define B_AX_DISPATCHER_CLK_EN BIT(18)
+#define B_AX_BBRPT_CLK_EN BIT(17)
#define B_AX_MAC_SEC_CLK_EN BIT(16)
#define PCI_LTR_IDLE_TIMER_1US 0
@@ -444,6 +567,7 @@
#define R_AX_PLE_QTA8_CFG 0x9060
#define R_AX_PLE_QTA9_CFG 0x9064
#define R_AX_PLE_QTA10_CFG 0x9068
+#define R_AX_PLE_QTA11_CFG 0x906C
#define R_AX_PLE_INI_STATUS 0x9100
#define B_AX_PLE_Q_MGN_INI_RDY BIT(1)
@@ -618,6 +742,30 @@
#define R_AX_DBG_FUN_INTF_DATA 0x9F34
#define B_AX_DFI_DATA_MASK GENMASK(31, 0)
+#define R_AX_TXPKTCTL_B0_PRELD_CFG0 0x9F48
+#define B_AX_B0_PRELD_FEN BIT(31)
+#define B_AX_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B0_ENT_NUM 10
+#define PRELD_AMSDU_SIZE 52
+#define B_AX_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B0_PRELD_CFG1 0x9F4C
+#define B_AX_B0_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define PRELD_NEXT_WND 1
+#define B_AX_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88
+#define B_AX_B1_PRELD_FEN BIT(31)
+#define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define PRELD_B1_ENT_NUM 4
+#define B_AX_B1_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_AX_B1_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_AX_TXPKTCTL_B1_PRELD_CFG1 0x9F8C
+#define B_AX_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define B_AX_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
#define R_AX_AFE_CTRL1 0x0024
#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
@@ -745,6 +893,7 @@
#define B_AX_CTN_TXEN_VI_0 BIT(2)
#define B_AX_CTN_TXEN_BK_0 BIT(1)
#define B_AX_CTN_TXEN_BE_0 BIT(0)
+#define B_AX_CTN_TXEN_ALL_MASK GENMASK(15, 0)
#define R_AX_MUEDCA_BE_PARAM_0 0xC350
#define R_AX_MUEDCA_BE_PARAM_0_C1 0xE350
@@ -791,6 +940,12 @@
#define B_AX_CTN_CHK_CCA_S20 BIT(1)
#define B_AX_CTN_CHK_CCA_P20 BIT(0)
+#define R_AX_CTN_DRV_TXEN 0xC398
+#define R_AX_CTN_DRV_TXEN_C1 0xE398
+#define B_AX_CTN_TXEN_TWT_3 BIT(17)
+#define B_AX_CTN_TXEN_TWT_2 BIT(16)
+#define B_AX_CTN_TXEN_ALL_MASK_V1 GENMASK(17, 0)
+
#define R_AX_SCHEDULE_ERR_IMR 0xC3E8
#define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8
#define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1)
@@ -913,7 +1068,7 @@
#define R_AX_DTIM_CTRL_P2 0xC4A6
#define R_AX_DTIM_CTRL_P3 0xC4E6
#define R_AX_DTIM_CTRL_P4 0xC526
-#define B_AX_DTIM_NUM_MASK GENMASK(15, 0)
+#define B_AX_DTIM_NUM_MASK GENMASK(15, 8)
#define B_AX_DTIM_CURRCNT_MASK GENMASK(7, 0)
#define R_AX_TBTT_SHIFT_P0 0xC428
@@ -964,6 +1119,11 @@
#define B_AX_P0MB2_EN BIT(2)
#define B_AX_P0MB1_EN BIT(1)
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0 0xC590
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0_C1 0xE590
+#define R_AX_PORT_HGQ_WINDOW_CFG 0xC5A0
+#define R_AX_PORT_HGQ_WINDOW_CFG_C1 0xE5A0
+
#define R_AX_AMPDU_AGG_LIMIT 0xC610
#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
@@ -1080,6 +1240,13 @@
#define B_AX_TCR_ZLD_USTIME_AFTERPHYTXON GENMASK(11, 8)
#define B_AX_TCR_TXTIMEOUT GENMASK(7, 0)
+#define R_AX_MD_TSFT_STMP_CTL 0xCA08
+#define R_AX_MD_TSFT_STMP_CTL_C1 0xEA08
+#define B_AX_TSFT_OFS_MASK GENMASK(31, 16)
+#define B_AX_STMP_THSD_MASK GENMASK(15, 8)
+#define B_AX_UPD_HGQMD BIT(1)
+#define B_AX_UPD_TIMIE BIT(0)
+
#define R_AX_PPWRBIT_SETTING 0xCA0C
#define R_AX_PPWRBIT_SETTING_C1 0xEA0C
@@ -1391,8 +1558,10 @@
#define B_AX_PWR_UL_TB_CTRL_EN BIT(31)
#define R_AX_PWR_UL_TB_1T 0xD28C
#define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_1T_V1_MASK GENMASK(7, 0)
#define R_AX_PWR_UL_TB_2T 0xD290
#define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0)
+#define B_AX_PWR_UL_TB_2T_V1_MASK GENMASK(7, 0)
#define R_AX_PWR_BY_RATE_TABLE0 0xD2C0
#define R_AX_PWR_BY_RATE_TABLE10 0xD2E8
#define R_AX_PWR_BY_RATE R_AX_PWR_BY_RATE_TABLE0
@@ -1458,6 +1627,31 @@
#define B_AX_STATIS_BT_LO_TX_1_MASK GENMASK(15, 0)
#define B_AX_STATIS_BT_LO_RX_1_MASK GENMASK(31, 16)
+#define R_AX_GNT_SW_CTRL 0xDA48
+#define R_AX_GNT_SW_CTRL_C1 0xFA48
+#define B_AX_WL_ACT2_VAL BIT(21)
+#define B_AX_WL_ACT2_SWCTRL BIT(20)
+#define B_AX_WL_ACT_VAL BIT(19)
+#define B_AX_WL_ACT_SWCTRL BIT(18)
+#define B_AX_GNT_BT_RX_VAL BIT(17)
+#define B_AX_GNT_BT_RX_SWCTRL BIT(16)
+#define B_AX_GNT_BT_TX_VAL BIT(15)
+#define B_AX_GNT_BT_TX_SWCTRL BIT(14)
+#define B_AX_GNT_WL_RX_VAL BIT(13)
+#define B_AX_GNT_WL_RX_SWCTRL BIT(12)
+#define B_AX_GNT_WL_TX_VAL BIT(11)
+#define B_AX_GNT_WL_TX_SWCTRL BIT(10)
+#define B_AX_GNT_BT_RFC_S1_VAL BIT(9)
+#define B_AX_GNT_BT_RFC_S1_SWCTRL BIT(8)
+#define B_AX_GNT_WL_RFC_S1_VAL BIT(7)
+#define B_AX_GNT_WL_RFC_S1_SWCTRL BIT(6)
+#define B_AX_GNT_BT_RFC_S0_VAL BIT(5)
+#define B_AX_GNT_BT_RFC_S0_SWCTRL BIT(4)
+#define B_AX_GNT_WL_RFC_S0_VAL BIT(3)
+#define B_AX_GNT_WL_RFC_S0_SWCTRL BIT(2)
+#define B_AX_GNT_WL_BB_VAL BIT(1)
+#define B_AX_GNT_WL_BB_SWCTRL BIT(0)
+
#define R_AX_TDMA_MODE 0xDA4C
#define R_AX_TDMA_MODE_C1 0xFA4C
#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
@@ -1669,6 +1863,17 @@
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
#define B_ANAPAR_14 GENMASK(15, 0)
+#define R_SWSI_DATA_V1 0x0370
+#define B_SWSI_DATA_VAL_V1 GENMASK(19, 0)
+#define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20)
+#define B_SWSI_DATA_PATH_V1 GENMASK(30, 28)
+#define B_SWSI_DATA_BIT_MASK_EN_V1 BIT(31)
+#define R_SWSI_BIT_MASK_V1 0x0374
+#define B_SWSI_BIT_MASK_V1 GENMASK(19, 0)
+#define R_SWSI_READ_ADDR_V1 0x0378
+#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
+#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
+#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
#define R_UPD_CLK_ADC 0x0700
#define B_UPD_CLK_ADC_ON BIT(24)
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
@@ -1776,6 +1981,10 @@
#define R_CFO_COMP_SEG0_H 0x1388
#define R_CFO_COMP_SEG0_CTRL 0x138C
#define R_DBG32_D 0x1730
+#define R_SWSI_V1 0x174C
+#define B_SWSI_W_BUSY_V1 BIT(24)
+#define B_SWSI_R_BUSY_V1 BIT(25)
+#define B_SWSI_R_DATA_DONE_V1 BIT(26)
#define R_TX_COUNTER 0x1A40
#define R_IFS_CLM_TX_CNT 0x1ACC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
@@ -1959,6 +2168,12 @@
#define R_CHBW_MOD 0x4978
#define B_CHBW_MOD_PRICH GENMASK(11, 8)
#define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define R_DCFO_COMP_S0_V1 0x4A40
+#define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0)
+#define R_BMODE_PDTH_V1 0x4B64
+#define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24)
+#define R_BMODE_PDTH_EN_V1 0x4B74
+#define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30)
#define R_CFO_COMP_SEG1_L 0x5384
#define R_CFO_COMP_SEG1_H 0x5388
#define R_CFO_COMP_SEG1_CTRL 0x538C
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 6b75e4bc7352..41fc8db311ec 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -3,6 +3,7 @@
*/
#include "coex.h"
+#include "fw.h"
#include "mac.h"
#include "phy.h"
#include "reg.h"
@@ -36,16 +37,19 @@ static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie,
- &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
- [RTW89_QTA_DLFW] = {NULL, NULL, &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ &rtw89_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
};
static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &wde_size0, &ple_size0, &wde_qt0,
- &wde_qt0, &ple_qt4, &ple_qt5},
- [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &wde_size4, &ple_size4,
- &wde_qt4, &wde_qt4, &ple_qt13, &ple_qt13},
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size0, &rtw89_ple_size0,
+ &rtw89_wde_qt0, &rtw89_wde_qt0, &rtw89_ple_qt4,
+ &rtw89_ple_qt5},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size4, &rtw89_ple_size4,
+ &rtw89_wde_qt4, &rtw89_wde_qt4, &rtw89_ple_qt13,
+ &rtw89_ple_qt13},
[RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
NULL},
};
@@ -373,6 +377,35 @@ static const struct rtw89_pwr_cfg * const pwr_off_seq_8852a[] = {
rtw8852a_pwroff, NULL
};
+static const u32 rtw8852a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
+ R_AX_H2CREG_DATA3
+};
+
+static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1, R_AX_C2HREG_DATA2,
+ R_AX_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8852a_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1,
+};
+
+static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
+ R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -1134,7 +1167,7 @@ static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
u8 phy_idx = RTW89_PHY_0;
if (enter) {
- rtw89_mac_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
rtw8852a_dfs_en(rtwdev, false);
rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
@@ -1147,7 +1180,7 @@ static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw8852a_dfs_en(rtwdev, true);
rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_mac_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
}
}
@@ -1242,10 +1275,10 @@ static u32 rtw8852a_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
static
void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
- s16 pw_ofst, enum rtw89_mac_idx mac_idx)
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
{
- s32 val_1t = 0;
- s32 val_2t = 0;
+ s8 val_1t = 0;
+ s8 val_2t = 0;
u32 reg;
if (pw_ofst < -16 || pw_ofst > 15) {
@@ -1255,7 +1288,7 @@ void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
- val_1t = (s32)pw_ofst;
+ val_1t = pw_ofst;
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t);
val_2t = max(val_1t - 3, -16);
@@ -1984,6 +2017,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.query_ppdu = rtw8852a_query_ppdu,
.bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .pwr_on_func = NULL,
+ .pwr_off_func = NULL,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2019,6 +2058,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bw160 = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
@@ -2029,6 +2071,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
.limit_efuse_size = 1152,
+ .dav_phy_efuse_size = 0,
+ .dav_log_efuse_size = 0,
.phycap_addr = 0x580,
.phycap_size = 128,
.para_ver = 0x05050864,
@@ -2049,7 +2093,18 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL,
+ .h2c_regs = rtw8852a_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL,
+ .c2h_regs = rtw8852a_c2h_regs,
+ .page_regs = &rtw8852a_page_regs,
+ .dcfo_comp = &rtw8852a_dcfo_comp,
+ .dcfo_comp_sft = 3,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
MODULE_FIRMWARE("rtw89/rtw8852a_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852A driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index 633384374de0..fcff1194c009 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -93,6 +93,8 @@ struct rtw8852a_bb_pmac_info {
u8 duty_cycle;
};
+extern const struct rtw89_chip_info rtw8852a_chip_info;
+
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index c021e93eb07b..ad272854c442 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -12,66 +12,6 @@
#include "rtw8852a_rfk_table.h"
#include "rtw8852a_table.h"
-static void
-_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
-}
-
-static void
-_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
-}
-
-static void
-_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
-{
- udelay(def->data);
-}
-
-static void
-(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
- [RTW89_RFK_F_WRF] = _rfk_write_rf,
- [RTW89_RFK_F_WM] = _rfk_write32_mask,
- [RTW89_RFK_F_WS] = _rfk_write32_set,
- [RTW89_RFK_F_WC] = _rfk_write32_clr,
- [RTW89_RFK_F_DELAY] = _rfk_delay,
-};
-
-static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
-
-static void
-rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
-{
- const struct rtw89_reg5_def *p = tbl->defs;
- const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
-
- for (; p < end; p++)
- _rfk_handler[p->flag](rtwdev, p);
-}
-
-#define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f) \
- do { \
- typeof(rtwdev) _dev = (rtwdev); \
- if (cond) \
- rtw89_rfk_parser(_dev, (tbl_t)); \
- else \
- rtw89_rfk_parser(_dev, (tbl_f)); \
- } while (0)
-
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
@@ -2977,6 +2917,7 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
u8 i, j;
switch (subband) {
+ default:
case RTW89_CH_2G:
thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
@@ -3161,6 +3102,7 @@ static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 subband = rtwdev->hal.current_subband;
switch (subband) {
+ default:
case RTW89_CH_2G:
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
&rtw8852a_tssi_pak_defs_a_2g_tbl,
@@ -3584,7 +3526,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
u8 ch = rtwdev->hal.current_channel, ch_tmp;
u8 bw = rtwdev->hal.current_band_width;
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
s8 power;
s16 xdbm;
@@ -3612,7 +3554,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
__func__, phy, power, xdbm);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
@@ -3658,7 +3600,7 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
- rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
}
@@ -3681,11 +3623,11 @@ void rtw8852a_dack(struct rtw89_dev *rtwdev)
void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
@@ -3694,7 +3636,7 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
else
_iqk(rtwdev, phy_idx, false);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
@@ -3706,33 +3648,33 @@ void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
bool is_afe)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_rx_dck(rtwdev, phy_idx, is_afe);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
- u16 tx_en;
+ u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
- rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
_dpk(rtwdev, phy_idx, false);
- rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
index 510570090502..dd2a978b9bae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
@@ -5,1603 +5,1603 @@
#include "rtw8852a_rfk_table.h"
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = {
- DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
- DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
- DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
- DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
- DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
- DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
- DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
- DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
- DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
- DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
- DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
- DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
- DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
- DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
- DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
- DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
- DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
- DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
+ RTW89_DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
+ RTW89_DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
+ RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
+ RTW89_DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
+ RTW89_DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = {
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = {
- DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
- DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
- DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
- DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = {
- DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
- DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
- DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
- DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
- DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
- DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
- DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
- DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
- DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
- DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
- DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
- DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
- DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
- DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
- DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
- DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
- DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
- DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
- DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
- DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
- DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
- DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
- DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
- DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
- DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
- DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
- DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
- DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
- DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
- DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
- DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
- DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
- DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
- DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
- DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
- DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
- DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
- DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
- DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
- DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
- DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
+ RTW89_DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
+ RTW89_DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
+ RTW89_DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
+ RTW89_DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
+ RTW89_DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
+ RTW89_DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
+ RTW89_DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
+ RTW89_DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
+ RTW89_DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = {
- DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
- DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
- DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
- DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
- DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
- DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
- DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
- DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
- DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
- DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
- DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
- DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
- DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
- DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
- DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
- DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
- DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
- DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
- DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
- DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
- DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
- DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
- DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
- DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
- DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
- DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
- DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
- DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
- DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
- DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
- DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
- DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
- DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
- DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
- DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
- DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
- DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
- DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
- DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
- DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
- DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
- DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
- DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
- DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
+ RTW89_DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
+ RTW89_DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
+ RTW89_DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
+ RTW89_DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
+ RTW89_DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
+ RTW89_DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
+ RTW89_DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
+ RTW89_DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
+ RTW89_DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
+ RTW89_DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
+ RTW89_DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = {
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = {
- DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
- DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ RTW89_DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
- DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
- DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
- DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
- DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = {
- DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
- DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = {
- DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
- DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
- DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = {
- DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
- DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = {
- DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
- DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
- DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = {
- DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
- DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
- DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
- DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
- DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
- DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
+ RTW89_DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
+ RTW89_DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = {
- DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
- DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
- DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
- DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
- DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
- DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
- DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
- DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
- DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
- DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
- DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
- DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
+ RTW89_DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
- DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = {
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
- DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
- DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
- DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
- DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
- DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
- DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
- DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
- DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
- DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
- DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
- DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = {
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
- DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
- DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
- DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
- DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
- DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
- DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = {
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = {
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
- DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
- DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = {
- DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
- DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
- DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
- DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
+ RTW89_DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
+ RTW89_DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
+ RTW89_DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
+ RTW89_DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = {
- DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
- DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
- DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
- DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
- DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
- DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
- DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
-};
-
-DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
+ RTW89_DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
+ RTW89_DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
+ RTW89_DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
+ RTW89_DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = {
- DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
- DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
+ RTW89_DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = {
- DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
- DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
+ RTW89_DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
- DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
- DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = {
- DECL_RFK_WM(0x5820, 0x80000000, 0x0),
- DECL_RFK_WM(0x5820, 0x80000000, 0x1),
- DECL_RFK_WM(0x5818, 0x18000000, 0x3),
- DECL_RFK_WM(0x7820, 0x80000000, 0x0),
- DECL_RFK_WM(0x7820, 0x80000000, 0x1),
- DECL_RFK_WM(0x7818, 0x18000000, 0x3),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5818, 0x18000000, 0x3),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7818, 0x18000000, 0x3),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = {
- DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
- DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
+ RTW89_DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
+ RTW89_DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
};
-DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = {
- DECL_RFK_WC(0x12ec, 0x00008000),
- DECL_RFK_WS(0x12ec, 0x00008000),
- DECL_RFK_WC(0x5e00, 0x00000001),
- DECL_RFK_WS(0x5e00, 0x00000001),
- DECL_RFK_WC(0x32ec, 0x00008000),
- DECL_RFK_WS(0x32ec, 0x00008000),
- DECL_RFK_WC(0x7e00, 0x00000001),
- DECL_RFK_WS(0x7e00, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12ec, 0x00008000),
+ RTW89_DECL_RFK_WS(0x12ec, 0x00008000),
+ RTW89_DECL_RFK_WC(0x5e00, 0x00000001),
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32ec, 0x00008000),
+ RTW89_DECL_RFK_WS(0x32ec, 0x00008000),
+ RTW89_DECL_RFK_WC(0x7e00, 0x00000001),
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = {
- DECL_RFK_WS(0x5e00, 0x00000008),
- DECL_RFK_WS(0x5e50, 0x00000008),
- DECL_RFK_WS(0x5e10, 0x80000000),
- DECL_RFK_WS(0x5e60, 0x80000000),
- DECL_RFK_WC(0x5e00, 0x00000008),
- DECL_RFK_WC(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x5e60, 0x80000000),
+ RTW89_DECL_RFK_WC(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e50, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = {
- DECL_RFK_WS(0x7e00, 0x00000008),
- DECL_RFK_WS(0x7e50, 0x00000008),
- DECL_RFK_WS(0x7e10, 0x80000000),
- DECL_RFK_WS(0x7e60, 0x80000000),
- DECL_RFK_WC(0x7e00, 0x00000008),
- DECL_RFK_WC(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x7e60, 0x80000000),
+ RTW89_DECL_RFK_WC(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e50, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = {
- DECL_RFK_WC(0x20f4, 0x01000000),
- DECL_RFK_WS(0x20f8, 0x80000000),
- DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
- DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
- DECL_RFK_WC(0x20f0, 0x0000000f),
- DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+ RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = {
- DECL_RFK_WC(0x20f4, 0x01000000),
- DECL_RFK_WS(0x20f8, 0x80000000),
- DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
- DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
- DECL_RFK_WC(0x20f0, 0x0000000f),
- DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x20f4, 0x01000000),
+ RTW89_DECL_RFK_WS(0x20f8, 0x80000000),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ RTW89_DECL_RFK_WC(0x20f0, 0x0000000f),
+ RTW89_DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = {
- DECL_RFK_WC(0x12d8, 0x00000030),
- DECL_RFK_WC(0x32d8, 0x00000030),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WC(0x032c, 0x00400000),
- DECL_RFK_WS(0x032c, 0x00400000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x12dc, 0x00000002),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12d8, 0x00000030),
+ RTW89_DECL_RFK_WC(0x32d8, 0x00000030),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12dc, 0x00000002),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = {
- DECL_RFK_WS(0x12d8, 0x000000c0),
- DECL_RFK_WS(0x12d8, 0x00000800),
- DECL_RFK_WC(0x12d8, 0x00000800),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
+ RTW89_DECL_RFK_WS(0x12d8, 0x000000c0),
+ RTW89_DECL_RFK_WS(0x12d8, 0x00000800),
+ RTW89_DECL_RFK_WC(0x12d8, 0x00000800),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = {
- DECL_RFK_WC(0x12dc, 0x00000002),
- DECL_RFK_WS(0x032c, 0x00010000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
- DECL_RFK_WS(0x032c, 0x40000000),
- DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x12dc, 0x00000002),
+ RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = {
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WC(0x032c, 0x00400000),
- DECL_RFK_WS(0x032c, 0x00400000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x32dc, 0x00000002),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x032c, 0x00400000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32dc, 0x00000002),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = {
- DECL_RFK_WS(0x32d8, 0x000000c0),
- DECL_RFK_WS(0x32d8, 0x00000800),
- DECL_RFK_WC(0x32d8, 0x00000800),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
+ RTW89_DECL_RFK_WS(0x32d8, 0x000000c0),
+ RTW89_DECL_RFK_WS(0x32d8, 0x00000800),
+ RTW89_DECL_RFK_WC(0x32d8, 0x00000800),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = {
- DECL_RFK_WC(0x32dc, 0x00000002),
- DECL_RFK_WS(0x032c, 0x00010000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
- DECL_RFK_WS(0x032c, 0x40000000),
- DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32dc, 0x00000002),
+ RTW89_DECL_RFK_WS(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ RTW89_DECL_RFK_WS(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = {
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x12dc, 0x00000001),
- DECL_RFK_WS(0x12e8, 0x00000004),
- DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12dc, 0x00000001),
+ RTW89_DECL_RFK_WS(0x12e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = {
- DECL_RFK_WC(0x032c, 0x40000000),
- DECL_RFK_WS(0x030c, 0x0f000000),
- DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
- DECL_RFK_WC(0x032c, 0x00010000),
- DECL_RFK_WS(0x32dc, 0x00000001),
- DECL_RFK_WS(0x32e8, 0x00000004),
- DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
+ RTW89_DECL_RFK_WC(0x032c, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x0f000000),
+ RTW89_DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ RTW89_DECL_RFK_WC(0x032c, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32dc, 0x00000001),
+ RTW89_DECL_RFK_WS(0x32e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = {
- DECL_RFK_WC(0x12dc, 0x00000001),
- DECL_RFK_WC(0x12e8, 0x00000004),
- DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
- DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12dc, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
+ RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = {
- DECL_RFK_WC(0x32dc, 0x00000001),
- DECL_RFK_WC(0x32e8, 0x00000004),
- DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
- DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32dc, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32e8, 0x00000004),
+ RTW89_DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
+ RTW89_DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = {
- DECL_RFK_WS(0x5e00, 0x00000008),
- DECL_RFK_WC(0x5e10, 0x80000000),
- DECL_RFK_WS(0x5e50, 0x00000008),
- DECL_RFK_WC(0x5e60, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WS(0x030c, 0x10000000),
- DECL_RFK_WC(0x032c, 0x80000000),
- DECL_RFK_WS(0x12e0, 0x00010000),
- DECL_RFK_WS(0x12e4, 0x0c000000),
- DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
- DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
- DECL_RFK_WC(0x5e00, 0x0c000000),
- DECL_RFK_WC(0x5e50, 0x0c000000),
- DECL_RFK_WC(0x5e0c, 0x00000008),
- DECL_RFK_WC(0x5e5c, 0x00000008),
- DECL_RFK_WS(0x5e0c, 0x00000001),
- DECL_RFK_WS(0x5e5c, 0x00000001),
- DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
+ RTW89_DECL_RFK_WS(0x5e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x5e50, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e60, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12e0, 0x00010000),
+ RTW89_DECL_RFK_WS(0x12e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WC(0x5e00, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x5e50, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x5e0c, 0x00000008),
+ RTW89_DECL_RFK_WC(0x5e5c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e0c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x5e5c, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = {
- DECL_RFK_WC(0x12e4, 0x0c000000),
- DECL_RFK_WS(0x5e0c, 0x00000008),
- DECL_RFK_WS(0x5e5c, 0x00000008),
- DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WC(0x12e4, 0x0c000000),
+ RTW89_DECL_RFK_WS(0x5e0c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x5e5c, 0x00000008),
+ RTW89_DECL_RFK_DELAY(1),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = {
- DECL_RFK_WC(0x5e0c, 0x00000001),
- DECL_RFK_WC(0x5e5c, 0x00000001),
- DECL_RFK_WC(0x12e0, 0x00010000),
- DECL_RFK_WC(0x12a0, 0x00008000),
- DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WC(0x5e0c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x5e5c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x12e0, 0x00010000),
+ RTW89_DECL_RFK_WC(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = {
- DECL_RFK_WS(0x7e00, 0x00000008),
- DECL_RFK_WC(0x7e10, 0x80000000),
- DECL_RFK_WS(0x7e50, 0x00000008),
- DECL_RFK_WC(0x7e60, 0x80000000),
- DECL_RFK_WS(0x32a0, 0x00008000),
- DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WS(0x030c, 0x10000000),
- DECL_RFK_WC(0x032c, 0x80000000),
- DECL_RFK_WS(0x32e0, 0x00010000),
- DECL_RFK_WS(0x32e4, 0x0c000000),
- DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
- DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
- DECL_RFK_WC(0x7e00, 0x0c000000),
- DECL_RFK_WC(0x7e50, 0x0c000000),
- DECL_RFK_WC(0x7e0c, 0x00000008),
- DECL_RFK_WC(0x7e5c, 0x00000008),
- DECL_RFK_WS(0x7e0c, 0x00000001),
- DECL_RFK_WS(0x7e5c, 0x00000001),
- DECL_RFK_DELAY(1),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
+ RTW89_DECL_RFK_WS(0x7e00, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e10, 0x80000000),
+ RTW89_DECL_RFK_WS(0x7e50, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e60, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x030c, 0x10000000),
+ RTW89_DECL_RFK_WC(0x032c, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32e0, 0x00010000),
+ RTW89_DECL_RFK_WS(0x32e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
+ RTW89_DECL_RFK_WC(0x7e00, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x7e50, 0x0c000000),
+ RTW89_DECL_RFK_WC(0x7e0c, 0x00000008),
+ RTW89_DECL_RFK_WC(0x7e5c, 0x00000008),
+ RTW89_DECL_RFK_WS(0x7e0c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x7e5c, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = {
- DECL_RFK_WC(0x32e4, 0x0c000000),
- DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
- DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
- DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WC(0x32e4, 0x0c000000),
+ RTW89_DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = {
- DECL_RFK_WC(0x7e0c, 0x00000001),
- DECL_RFK_WC(0x7e5c, 0x00000001),
- DECL_RFK_WC(0x32e0, 0x00010000),
- DECL_RFK_WC(0x32a0, 0x00008000),
- DECL_RFK_WS(0x32a0, 0x00007000),
+ RTW89_DECL_RFK_WC(0x7e0c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x7e5c, 0x00000001),
+ RTW89_DECL_RFK_WC(0x32e0, 0x00010000),
+ RTW89_DECL_RFK_WC(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x12b8, 0x10000000),
- DECL_RFK_WS(0x58c8, 0x01000000),
- DECL_RFK_WS(0x5864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x0c1c, 0x00000004),
- DECL_RFK_WS(0x0700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x58ac, 0x08000000),
- DECL_RFK_WS(0x0c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = {
- DECL_RFK_WS(0x4490, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00007000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x12a0, 0x00080000),
- DECL_RFK_WS(0x0700, 0x01000000),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x32b8, 0x10000000),
- DECL_RFK_WS(0x78c8, 0x01000000),
- DECL_RFK_WS(0x7864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x2c1c, 0x00000004),
- DECL_RFK_WS(0x2700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x78ac, 0x08000000),
- DECL_RFK_WS(0x2c3c, 0x00000200),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x32b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x2c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x2700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x2c3c, 0x00000200),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = {
- DECL_RFK_WS(0x6490, 0x80000000),
- DECL_RFK_WS(0x32a0, 0x00007000),
- DECL_RFK_WS(0x32a0, 0x00008000),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x32a0, 0x00080000),
- DECL_RFK_WS(0x2700, 0x01000000),
- DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WS(0x6490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x2700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WS(0x12b8, 0x40000000),
- DECL_RFK_WS(0x32b8, 0x40000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WS(0x12b8, 0x10000000),
- DECL_RFK_WS(0x58c8, 0x01000000),
- DECL_RFK_WS(0x78c8, 0x01000000),
- DECL_RFK_WS(0x5864, 0xc0000000),
- DECL_RFK_WS(0x7864, 0xc0000000),
- DECL_RFK_WS(0x2008, 0x01ffffff),
- DECL_RFK_WS(0x0c1c, 0x00000004),
- DECL_RFK_WS(0x0700, 0x08000000),
- DECL_RFK_WS(0x0c70, 0x000003ff),
- DECL_RFK_WS(0x0c60, 0x00000003),
- DECL_RFK_WS(0x0c6c, 0x00000001),
- DECL_RFK_WS(0x58ac, 0x08000000),
- DECL_RFK_WS(0x78ac, 0x08000000),
- DECL_RFK_WS(0x0c3c, 0x00000200),
- DECL_RFK_WS(0x2344, 0x80000000),
- DECL_RFK_WS(0x4490, 0x80000000),
- DECL_RFK_WS(0x12a0, 0x00007000),
- DECL_RFK_WS(0x12a0, 0x00008000),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x12a0, 0x00080000),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WS(0x32a0, 0x00080000),
- DECL_RFK_WS(0x0700, 0x01000000),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WS(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WS(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WS(0x12b8, 0x10000000),
+ RTW89_DECL_RFK_WS(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x78c8, 0x01000000),
+ RTW89_DECL_RFK_WS(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WS(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WS(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WS(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c70, 0x000003ff),
+ RTW89_DECL_RFK_WS(0x0c60, 0x00000003),
+ RTW89_DECL_RFK_WS(0x0c6c, 0x00000001),
+ RTW89_DECL_RFK_WS(0x58ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x78ac, 0x08000000),
+ RTW89_DECL_RFK_WS(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WS(0x2344, 0x80000000),
+ RTW89_DECL_RFK_WS(0x4490, 0x80000000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00007000),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00008000),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x12a0, 0x00080000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WS(0x32a0, 0x00080000),
+ RTW89_DECL_RFK_WS(0x0700, 0x01000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WC(0x12b8, 0x40000000),
- DECL_RFK_WC(0x5864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x0c1c, 0x00000004),
- DECL_RFK_WC(0x0700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x12a0, 0x000ff000),
- DECL_RFK_WC(0x0700, 0x07000000),
- DECL_RFK_WC(0x5864, 0x20000000),
- DECL_RFK_WC(0x0c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x58c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WC(0x32b8, 0x40000000),
- DECL_RFK_WC(0x7864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x2c1c, 0x00000004),
- DECL_RFK_WC(0x2700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x32a0, 0x000ff000),
- DECL_RFK_WC(0x2700, 0x07000000),
- DECL_RFK_WC(0x7864, 0x20000000),
- DECL_RFK_WC(0x2c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x2c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x2700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x2700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x2c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WC(0x12b8, 0x40000000),
- DECL_RFK_WC(0x32b8, 0x40000000),
- DECL_RFK_WC(0x5864, 0xc0000000),
- DECL_RFK_WC(0x7864, 0xc0000000),
- DECL_RFK_WC(0x2008, 0x01ffffff),
- DECL_RFK_WC(0x0c1c, 0x00000004),
- DECL_RFK_WC(0x0700, 0x08000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WC(0x12a0, 0x000ff000),
- DECL_RFK_WC(0x32a0, 0x000ff000),
- DECL_RFK_WC(0x0700, 0x07000000),
- DECL_RFK_WC(0x5864, 0x20000000),
- DECL_RFK_WC(0x7864, 0x20000000),
- DECL_RFK_WC(0x0c3c, 0x00000200),
- DECL_RFK_WC(0x20fc, 0xffff0000),
- DECL_RFK_WC(0x58c8, 0x01000000),
- DECL_RFK_WC(0x78c8, 0x01000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WC(0x12b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x32b8, 0x40000000),
+ RTW89_DECL_RFK_WC(0x5864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x7864, 0xc0000000),
+ RTW89_DECL_RFK_WC(0x2008, 0x01ffffff),
+ RTW89_DECL_RFK_WC(0x0c1c, 0x00000004),
+ RTW89_DECL_RFK_WC(0x0700, 0x08000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WC(0x12a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x32a0, 0x000ff000),
+ RTW89_DECL_RFK_WC(0x0700, 0x07000000),
+ RTW89_DECL_RFK_WC(0x5864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x7864, 0x20000000),
+ RTW89_DECL_RFK_WC(0x0c3c, 0x00000200),
+ RTW89_DECL_RFK_WC(0x20fc, 0xffff0000),
+ RTW89_DECL_RFK_WC(0x58c8, 0x01000000),
+ RTW89_DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = {
- DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
- DECL_RFK_WS(0x8074, 0x80000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
+ RTW89_DECL_RFK_WS(0x8074, 0x80000000),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = {
- DECL_RFK_WC(0x8074, 0x80000000),
- DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_DELAY(1),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WC(0x8074, 0x80000000),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_DELAY(1),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = {
- DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
- DECL_RFK_WC(0x80bc, 0x00004000),
- DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
+ RTW89_DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
+ RTW89_DECL_RFK_WC(0x80bc, 0x00004000),
+ RTW89_DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
};
-DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
- DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ RTW89_DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
- DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
- DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
- DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
- DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
- DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
- DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
- DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
- DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
- DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
- DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
- DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
- DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
- DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
- DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
- DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ RTW89_DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ RTW89_DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
+ RTW89_DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
+ RTW89_DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ RTW89_DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
- DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = {
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
- DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
- DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
- DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
- DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
- DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
- DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
- DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
- DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
- DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
- DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
- DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
- DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
-};
-
-DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ RTW89_DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
+ RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
index 4a4a45d778ff..33e6c404ecf9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
@@ -5,54 +5,7 @@
#ifndef __RTW89_8852A_RFK_TABLE_H__
#define __RTW89_8852A_RFK_TABLE_H__
-#include "core.h"
-
-enum rtw89_rfk_flag {
- RTW89_RFK_F_WRF = 0,
- RTW89_RFK_F_WM = 1,
- RTW89_RFK_F_WS = 2,
- RTW89_RFK_F_WC = 3,
- RTW89_RFK_F_DELAY = 4,
- RTW89_RFK_F_NUM,
-};
-
-struct rtw89_rfk_tbl {
- const struct rtw89_reg5_def *defs;
- u32 size;
-};
-
-#define DECLARE_RFK_TBL(_name) \
-const struct rtw89_rfk_tbl _name ## _tbl = { \
- .defs = _name, \
- .size = ARRAY_SIZE(_name), \
-}
-
-#define DECL_RFK_WRF(_path, _addr, _mask, _data) \
- {.flag = RTW89_RFK_F_WRF, \
- .path = _path, \
- .addr = _addr, \
- .mask = _mask, \
- .data = _data,}
-
-#define DECL_RFK_WM(_addr, _mask, _data) \
- {.flag = RTW89_RFK_F_WM, \
- .addr = _addr, \
- .mask = _mask, \
- .data = _data,}
-
-#define DECL_RFK_WS(_addr, _mask) \
- {.flag = RTW89_RFK_F_WS, \
- .addr = _addr, \
- .mask = _mask,}
-
-#define DECL_RFK_WC(_addr, _mask) \
- {.flag = RTW89_RFK_F_WC, \
- .addr = _addr, \
- .mask = _mask,}
-
-#define DECL_RFK_DELAY(_data) \
- {.flag = RTW89_RFK_F_DELAY, \
- .data = _data,}
+#include "phy.h"
extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
new file mode 100644
index 000000000000..48459aba441d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2021 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "rtw8852a.h"
+
+static const struct rtw89_pci_info rtw8852a_pci_info = {
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+};
+
+static const struct rtw89_driver_info rtw89_8852ae_info = {
+ .chip = &rtw8852a_chip_info,
+ .bus = {
+ .pci = &rtw8852a_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ae_id_table);
+
+static struct pci_driver rtw89_8852ae_driver = {
+ .name = "rtw89_8852ae",
+ .id_table = rtw89_8852ae_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ae_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852AE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
new file mode 100644
index 000000000000..58920e91765e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size19, &rtw89_ple_size19,
+ &rtw89_wde_qt18, &rtw89_wde_qt18, &rtw89_ple_qt46,
+ &rtw89_ple_qt47},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size18,
+ &rtw89_ple_size18, &rtw89_wde_qt17, &rtw89_wde_qt17,
+ &rtw89_ple_qt44, &rtw89_ple_qt45},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const u32 rtw8852c_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0_V1, R_AX_H2CREG_DATA1_V1, R_AX_H2CREG_DATA2_V1,
+ R_AX_H2CREG_DATA3_V1
+};
+
+static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_AX_C2HREG_DATA0_V1, R_AX_C2HREG_DATA1_V1, R_AX_C2HREG_DATA2_V1,
+ R_AX_C2HREG_DATA3_V1
+};
+
+static const struct rtw89_page_regs rtw8852c_page_regs = {
+ .hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1,
+ .ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1,
+ .ach_page_ctrl = R_AX_ACH0_PAGE_CTRL_V1,
+ .ach_page_info = R_AX_ACH0_PAGE_INFO_V1,
+ .pub_page_info3 = R_AX_PUB_PAGE_INFO3_V1,
+ .pub_page_ctrl1 = R_AX_PUB_PAGE_CTRL1_V1,
+ .pub_page_ctrl2 = R_AX_PUB_PAGE_CTRL2_V1,
+ .pub_page_info1 = R_AX_PUB_PAGE_INFO1_V1,
+ .pub_page_info2 = R_AX_PUB_PAGE_INFO2_V1,
+ .wp_page_ctrl1 = R_AX_WP_PAGE_CTRL1_V1,
+ .wp_page_ctrl2 = R_AX_WP_PAGE_CTRL2_V1,
+ .wp_page_info1 = R_AX_WP_PAGE_INFO1_V1,
+};
+
+static const struct rtw89_reg_def rtw8852c_dcfo_comp = {
+ R_DCFO_COMP_S0_V1, B_DCFO_COMP_S0_V1_MSK
+};
+
+static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ val32 = rtw89_read32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_PAD_HCI_SEL_V2_MASK);
+ if (val32 == MAC_AX_HCI_SEL_PCIE_USB)
+ rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
+ B_AX_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_AX_WLLPS_CTRL, B_AX_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFN_ONMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+
+ rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_CMAC1_FEN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, B_AX_R_SYM_WLCMAC1_P4_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P3_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P2_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P1_PC_EN |
+ B_AX_R_SYM_WLCMAC1_PC_EN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_GND_SHDN_WL, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL,
+ XTAL_SI_SHDN_WL, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
+ XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_EI,
+ XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_WEI,
+ XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_PON_EI,
+ XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15);
+
+ fsleep(1000);
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
+ rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
+ rtw89_write32_set(rtwdev, R_AX_GPIO0_15_EECS_EESK_LED1_PULL_LOW_EN,
+ B_AX_EECS_PULL_LOW_EN | B_AX_EESK_PULL_LOW_EN |
+ B_AX_LED1_PULL_LOW_EN);
+
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MPDU_PROC_EN |
+ B_AX_WD_RLS_EN | B_AX_DLE_WDE_EN | B_AX_TXPKT_CTRL_EN |
+ B_AX_STA_SCH_EN | B_AX_DLE_PLE_EN | B_AX_PKT_BUF_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_IN_EN | B_AX_DLE_CPUIO_EN |
+ B_AX_DISPATCHER_EN | B_AX_BBRPT_EN | B_AX_MAC_SEC_EN |
+ B_AX_MAC_UN_EN | B_AX_H_AXIDMA_EN);
+
+ rtw89_write32_set(rtwdev, R_AX_CMAC_FUNC_EN,
+ B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_FORCE_CMACREG_GCKEN | B_AX_PHYINTF_EN |
+ B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | B_AX_SCHEDULER_EN |
+ B_AX_TMAC_EN | B_AX_RMAC_EN);
+
+ return 0;
+}
+
+static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
+ XTAL_SI_RFC2RF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_OFF_WEI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0, XTAL_SI_RF00);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0, XTAL_SI_RF10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_SRAM2RFC,
+ XTAL_SI_SRAM2RFC);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_EI);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_PON_WEI);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_GND_SHDN_WL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_APFM_OFFMAC),
+ 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE);
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ return 0;
+}
+
+static void rtw8852c_e_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852c_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852c_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852c_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852c_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+ memcpy(tssi->tssi_6g_mcs[i], bw40_1s_tssi_6g_ofst[i],
+ sizeof(tssi->tssi_6g_mcs[i]));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852c_efuse *map;
+
+ map = (struct rtw8852c_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852c_efuse_parsing_tssi(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852c_e_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852c_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852C] = {0x5D6, 0x5AB};
+ static const u32 tssi_trim_addr_6g[RF_PATH_NUM_8852C] = {0x5CE, 0x5A3};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM_6G; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr_6g[i] - addr - j;
+ tssi->tssi_trim_6g[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ memset(tssi->tssi_trim_6g, 0, sizeof(tssi->tssi_trim_6g));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852c_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852C] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852c_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852c_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852C] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852c_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static int rtw8852c_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852c_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852c_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852c_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852c_thermal_trim(rtwdev);
+ rtw8852c_pa_bias_trim(rtwdev);
+}
+
+static
+void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ s8 pw_ofst_2tx;
+ s8 val_1t;
+ s8 val_2t;
+ u32 reg;
+ u8 i;
+
+ if (pw_ofst < -32 || pw_ofst > 31) {
+ rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+ return;
+ }
+ val_1t = pw_ofst << 2;
+ pw_ofst_2tx = max(pw_ofst - 3, -32);
+ val_2t = pw_ofst_2tx << 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_1tx=0x%x\n", val_1t);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] val_2tx=0x%x\n", val_2t);
+
+ for (i = 0; i < 4; i++) {
+ /* 1TX */
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_PWR_UL_TB_1T_V1_MASK << (8 * i),
+ val_1t);
+ /* 2TX */
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_PWR_UL_TB_2T_V1_MASK << (8 * i),
+ val_2t);
+ }
+}
+
+static const struct rtw89_chip_ops rtw8852c_chip_ops = {
+ .read_efuse = rtw8852c_read_efuse,
+ .read_phycap = rtw8852c_read_phycap,
+ .power_trim = rtw8852c_power_trim,
+ .read_rf = rtw89_phy_read_rf_v1,
+ .write_rf = rtw89_phy_write_rf_v1,
+ .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .pwr_on_func = rtw8852c_pwr_on_func,
+ .pwr_off_func = rtw8852c_pwr_off_func,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
+};
+
+const struct rtw89_chip_info rtw8852c_chip_info = {
+ .chip_id = RTL8852C,
+ .ops = &rtw8852c_chip_ops,
+ .fw_name = "rtw89/rtw8852c_fw.bin",
+ .dle_mem = rtw8852c_dle_mem_pcie,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 96,
+ .dav_log_efuse_size = 16,
+ .phycap_addr = 0x590,
+ .phycap_size = 0x60,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1,
+ .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1,
+ .h2c_regs = rtw8852c_h2c_regs,
+ .c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1,
+ .c2h_regs = rtw8852c_c2h_regs,
+ .page_regs = &rtw8852c_page_regs,
+ .dcfo_comp = &rtw8852c_dcfo_comp,
+ .dcfo_comp_sft = 5,
+};
+EXPORT_SYMBOL(rtw8852c_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852c_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852C driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
new file mode 100644
index 000000000000..d0594716040b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852C_H__
+#define __RTW89_8852C_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852C 2
+
+struct rtw8852c_u_efuse {
+ u8 rsvd[0x38];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_e_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852c_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852c_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852c_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852c_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[46];
+ u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd9[10];
+ u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd10[110];
+ u8 channel_plan_6g;
+ u8 rsvd11[71];
+ union {
+ struct rtw8852c_u_efuse u;
+ struct rtw8852c_e_efuse e;
+ };
+} __packed;
+
+extern const struct rtw89_chip_info rtw8852c_chip_info;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
new file mode 100644
index 000000000000..e71370585b4d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8852c.h"
+
+static const struct rtw89_pci_info rtw8852c_pci_info = {
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
+};
+
+static const struct rtw89_driver_info rtw89_8852ce_info = {
+ .chip = &rtw8852c_chip_info,
+ .bus = {
+ .pci = &rtw8852c_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8852ce_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xc852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ce_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ce_id_table);
+
+static struct pci_driver rtw89_8852ce_driver = {
+ .name = "rtw89_8852ce",
+ .id_table = rtw89_8852ce_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ce_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852CE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 75b11249f306..86e3d8b400d6 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -31,6 +31,8 @@
#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11)
#define RTW89_TXWD_BODY0_WD_PAGE BIT(7)
#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5)
+#define RTW89_TXWD_BODY0_HW_SSN_SEL GENMASK(3, 2)
+#define RTW89_TXWD_BODY0_HW_SSN_MODE GENMASK(1, 0)
/* TX WD BODY DWORD 1 */
#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16)
@@ -56,6 +58,7 @@
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
/* TX WD INFO DWORD 1 */
#define RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(24, 16)
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 12952b1c29df..e06da4b3b0d4 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -8,6 +8,7 @@
#include <net/mac80211.h>
#include <linux/sched.h>
+#include <linux/jiffies.h>
#include "queue.h"
#include "cw1200.h"
#include "debug.h"
@@ -94,7 +95,7 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
bool wakeup_stats = false;
list_for_each_entry_safe(item, tmp, &queue->queue, head) {
- if (jiffies - item->queue_timestamp < queue->ttl)
+ if (time_is_after_jiffies(item->queue_timestamp + queue->ttl))
break;
--queue->num_queued;
--queue->link_map_cache[item->txpriv.link_id];
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 99624dd34886..5a3e7a626702 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -537,7 +537,7 @@ int wsm_set_tx_queue_params(struct cw1200_common *priv,
{
int ret;
struct wsm_buf *buf = &priv->wsm_cmd_buf;
- u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+ static const u8 queue_id_to_wmm_aci[] = { 3, 2, 0, 1 };
wsm_cmd_lock(priv);
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index e64e4e579518..82bc0d44212e 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
zd->rxdatas = 0;
zd->rxlen = 0;
for (seq=0; len > 0; seq++) {
- request = kmalloc(16, gfp_mask);
+ request = kzalloc(16, gfp_mask);
if (!request)
return -ENOMEM;
urb = usb_alloc_urb(0, gfp_mask);
@@ -529,7 +529,6 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len
kfree(request);
return -ENOMEM;
}
- memset(request, 0, 16);
reqlen = len>12 ? 12 : len;
request[0] = ZD1201_USB_RESREQ;
request[1] = seq;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
index f2f57751a7d2..e916139b8cd4 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c
@@ -12,10 +12,10 @@
void ipc_debugfs_init(struct iosm_imem *ipc_imem)
{
- struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev);
+ ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev);
ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
- debugfs_pdev);
+ ipc_imem->debugfs_wwan_dir);
ipc_imem->trace = ipc_trace_init(ipc_imem);
if (!ipc_imem->trace)
@@ -26,4 +26,5 @@ void ipc_debugfs_deinit(struct iosm_imem *ipc_imem)
{
ipc_trace_deinit(ipc_imem->trace);
debugfs_remove_recursive(ipc_imem->debugfs_dir);
+ wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index f9e8e0ee4de3..1e6a47976642 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -114,17 +114,35 @@ ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
return HRTIMER_NORESTART;
}
+static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_mux_ul_adb_finish(ipc_imem->mux);
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, adb_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
struct ipc_mux_config *cfg)
{
ipc_mmio_update_cp_capability(ipc_imem->mmio);
- if (!ipc_imem->mmio->has_mux_lite) {
+ if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
dev_err(ipc_imem->dev, "Failed to get Mux capability.");
return -EINVAL;
}
- cfg->protocol = MUX_LITE;
+ cfg->protocol = ipc_imem->mmio->mux_protocol;
cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
MUX_UL_ON_CREDITS :
@@ -153,6 +171,10 @@ void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
IPC_MSG_PREP_FEATURE_SET, &prep_args);
}
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
+ * @ipc_imem: Pointer to imem data-struct
+ */
void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
{
/* Use the TD update timer only in the runtime phase */
@@ -179,6 +201,21 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
hrtimer_cancel(hr_timer);
}
+/**
+ * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
+{
+ if (!hrtimer_active(&ipc_imem->adb_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
+ hrtimer_start(&ipc_imem->adb_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
@@ -550,6 +587,11 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+ if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
+ chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
+ ctrl_chl_idx++;
+ continue;
+ }
if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
chnl_cfg_port,
@@ -680,8 +722,11 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
}
/* Try to generate new ADB or ADGH. */
- if (ipc_mux_ul_data_encode(ipc_imem->mux))
+ if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
ipc_imem_td_update_timer_start(ipc_imem);
+ if (ipc_imem->mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_imem);
+ }
/* Continue the send procedure with accumulated SIO or NETIF packets.
* Reset the debounce flags.
@@ -1330,6 +1375,9 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
HRTIMER_MODE_REL);
ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
goto imem_config_fail;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 98554e9beb01..e700dc8bfe0a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -317,6 +317,7 @@ enum ipc_phase {
* @tdupdate_timer: Delay the TD update doorbell.
* @fast_update_timer: forced head pointer update delay timer.
* @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @adb_timer: Timer for finishing the ADB.
* @rom_exit_code: Mapped boot rom exit code.
* @enter_runtime: 1 means the transition to runtime phase was
* executed.
@@ -340,6 +341,7 @@ enum ipc_phase {
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
+ * @debugfs_wwan_dir: WWAN Debug FS directory entry
* @debugfs_dir: Debug FS directory for driver-specific entries
*/
struct iosm_imem {
@@ -364,6 +366,7 @@ struct iosm_imem {
struct hrtimer tdupdate_timer;
struct hrtimer fast_update_timer;
struct hrtimer td_alloc_timer;
+ struct hrtimer adb_timer;
enum rom_exit_code rom_exit_code;
u32 enter_runtime;
struct completion ul_pend_sem;
@@ -382,6 +385,7 @@ struct iosm_imem {
reset_det_n:1,
pcie_wake_n:1;
#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_wwan_dir;
struct dentry *debugfs_dir;
#endif
};
@@ -593,4 +597,7 @@ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
* Returns: 0 on success, -1 on failure
*/
int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
+
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index f09e5e77a2a5..63eb08c43c05 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_mux.h"
/* Definition of MMIO offsets
* note that MMIO_CI offsets are relative to end of chip info structure
@@ -71,8 +72,9 @@ void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
ver = ipc_mmio_get_cp_version(ipc_mmio);
cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
- ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
- !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
+ ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
+ (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
+ : MUX_LITE;
ipc_mmio->has_ul_flow_credit =
(ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
index f861994a6d90..193d7ba2478a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -72,7 +72,7 @@ struct mmio_offset {
* @context_info_addr: Physical base address of context info structure
* @chip_info_version: Version of chip info structure
* @chip_info_size: Size of chip info structure
- * @has_mux_lite: It doesn't support mux aggergation
+ * @mux_protocol: mux protocol
* @has_ul_flow_credit: Ul flow credit support
* @has_slp_no_prot: Device sleep no protocol support
* @has_mcr_support: Usage of mcr support
@@ -84,8 +84,8 @@ struct iosm_mmio {
phys_addr_t context_info_addr;
unsigned int chip_info_version;
unsigned int chip_info_size;
- u8 has_mux_lite:1,
- has_ul_flow_credit:1,
+ u32 mux_protocol;
+ u8 has_ul_flow_credit:1,
has_slp_no_prot:1,
has_mcr_support:1;
};
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
index 8e66ffe92055..9c7a9a2a1f25 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -279,9 +279,10 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
struct iosm_imem *imem)
{
struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
- int i, ul_tds, ul_td_size;
+ int i, j, ul_tds, ul_td_size;
struct sk_buff_head *free_list;
struct sk_buff *skb;
+ int qlt_size;
if (!ipc_mux)
return NULL;
@@ -321,6 +322,24 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
ipc_mux->channel_id = -1;
ipc_mux->channel = NULL;
+ if (ipc_mux->protocol != MUX_LITE) {
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
+ GFP_ATOMIC);
+ if (!ipc_mux->ul_adb.pp_qlt[i]) {
+ for (j = i - 1; j >= 0; j--)
+ kfree(ipc_mux->ul_adb.pp_qlt[j]);
+ return NULL;
+ }
+ }
+
+ ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
+ ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ }
+
/* Allocate the list of UL ADB. */
for (i = 0; i < ul_tds; i++) {
dma_addr_t mapping;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
index 88debaa1ed31..cd9d74cc097f 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -8,9 +8,12 @@
#include "iosm_ipc_protocol.h"
-/* Size of the buffer for the IP MUX data buffer. */
-#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024)
-#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE
+#define IPC_MEM_MAX_UL_DG_ENTRIES 100
+#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60
+
+#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
+#define IPC_MEM_MAX_DL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
/* Size of the buffer for the IP MUX Lite data buffer. */
#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
@@ -167,6 +170,7 @@ enum mux_state {
enum ipc_mux_protocol {
MUX_UNKNOWN,
MUX_LITE,
+ MUX_AGGREGATION,
};
/* Supported UL data transfer methods. */
@@ -192,24 +196,111 @@ struct mux_session {
flush:1; /* flush net interface ? */
};
-/* State of a single UL data block. */
+/**
+ * struct mux_adth_dg - Structure of the datagram in the Aggregated Datagram
+ * Table Header.
+ * @datagram_index : Index (in bytes) to the k-th datagram in the table.
+ * Index shall count from the start of the block including
+ * the 16-byte header. This value shall be non-zero.
+ * @datagram_length: Length of the k-th datagram including the head padding.
+ * This value shall be non-zero.
+ * @service_class: Service class identifier for the datagram.
+ * @reserved: Reserved bytes. Set to zero
+ */
+struct mux_adth_dg {
+ __le32 datagram_index;
+ __le16 datagram_length;
+ u8 service_class;
+ u8 reserved;
+};
+
+/**
+ * struct mux_qlth_ql - Structure of the queue level in the Aggregated
+ * Datagram Queue Level Table Header.
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_qlth_ql {
+ __le32 nr_of_bytes;
+};
+
+/**
+ * struct mux_qlth - Structure of Aggregated Datagram Queue Level Table
+ * Header.
+ * @signature: Signature of the Queue Level Table Header
+ * Value: 0x48544C51 (ASCII characters: 'Q' 'L' 'T' 'H')
+ * @table_length: Length (in bytes) of the datagram table. This length
+ * shall include the queue level table header size.
+ * Minimum value:0x10
+ * @if_id: ID of the interface the queue levels in the table
+ * belong to.
+ * @reserved: Reserved byte. Set to zero.
+ * @next_table_index: Index (in bytes) to the next table in the buffer. Index
+ * shall count from the start of the block including the
+ * 16-byte header. Value of zero indicates end of the list.
+ * @reserved2: Reserved bytes. Set to zero
+ * @ql: Queue level table with variable length
+ */
+struct mux_qlth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_qlth_ql ql;
+};
+
+/**
+ * struct mux_adb - Structure of State of a single UL data block.
+ * @dest_skb: Current UL skb for the data block.
+ * @buf: ADB memory
+ * @adgh: ADGH pointer
+ * @qlth_skb: QLTH pointer
+ * @next_table_index: Pointer to next table index.
+ * @free_list: List of alloc. ADB for the UL sess.
+ * @size: Size of the ADB memory.
+ * @if_cnt: Statistic counter
+ * @dg_cnt_total: Datagram count total
+ * @payload_size: Payload Size
+ * @dg: Datagram table.
+ * @pp_qlt: Pointers to hold Queue Level Tables of session
+ * @adbh: ADBH pointer
+ * @qlt_updated: Queue level table updated
+ * @dg_count: Datagram count
+ */
struct mux_adb {
- struct sk_buff *dest_skb; /* Current UL skb for the data block. */
- u8 *buf; /* ADB memory. */
- struct mux_adgh *adgh; /* ADGH pointer */
- struct sk_buff *qlth_skb; /* QLTH pointer */
- u32 *next_table_index; /* Pointer to next table index. */
- struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/
- int size; /* Size of the ADB memory. */
- u32 if_cnt; /* Statistic counter */
+ struct sk_buff *dest_skb;
+ u8 *buf;
+ struct mux_adgh *adgh;
+ struct sk_buff *qlth_skb;
+ u32 *next_table_index;
+ struct sk_buff_head free_list;
+ int size;
+ u32 if_cnt;
u32 dg_cnt_total;
u32 payload_size;
+ struct mux_adth_dg
+ dg[IPC_MEM_MUX_IP_SESSION_ENTRIES][IPC_MEM_MAX_UL_DG_ENTRIES];
+ struct mux_qlth *pp_qlt[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct mux_adbh *adbh;
+ u32 qlt_updated[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ u32 dg_count[IPC_MEM_MUX_IP_SESSION_ENTRIES];
};
-/* Temporary ACB state. */
+/**
+ * struct mux_acb - Structure of Temporary ACB state.
+ * @skb: Used UL skb.
+ * @if_id: Session id.
+ * @buf_p: Command buffer.
+ * @wanted_response: Wanted Response
+ * @got_response: Got response
+ * @cmd: command
+ * @got_param: Received command/response parameter
+ */
struct mux_acb {
struct sk_buff *skb; /* Used UL skb. */
int if_id; /* Session id. */
+ u8 *buf_p;
u32 wanted_response;
u32 got_response;
u32 cmd;
@@ -241,6 +332,12 @@ struct mux_acb {
* @wwan_q_offset: This will hold the offset of the given instance
* Useful while passing or receiving packets from
* wwan/imem layer.
+ * @adb_finish_timer: Timer for forcefully finishing the ADB
+ * @acb_tx_sequence_nr: Sequence number for the ACB header.
+ * @params: user configurable parameters
+ * @adb_tx_sequence_nr: Sequence number for ADB header
+ * @acc_adb_size: Statistic data for logging
+ * @acc_payload_size: Statistic data for logging
* @initialized: MUX object is initialized
* @ev_mux_net_transmit_pending:
* 0 means inform the IPC tasklet to pass the
@@ -269,10 +366,16 @@ struct iosm_mux {
long long ul_data_pend_bytes;
struct mux_acb acb;
int wwan_q_offset;
+ struct hrtimer adb_finish_timer;
+ u16 acb_tx_sequence_nr;
+ struct ipc_params *params;
+ u16 adb_tx_sequence_nr;
+ unsigned long long acc_adb_size;
+ unsigned long long acc_payload_size;
u8 initialized:1,
ev_mux_net_transmit_pending:1,
- adb_prep_ongoing:1;
-};
+ adb_prep_ongoing;
+} __packed;
/* MUX configuration structure */
struct ipc_mux_config {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index 40fb54a0513e..d41e373f9c0a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -54,6 +54,49 @@ static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
return 0;
}
+/* Initialize the command header. */
+static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_acbh *header;
+
+ header = (struct mux_acbh *)(acb->skb)->data;
+ header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
+ header->first_cmd_index = header->block_length;
+ header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
+ header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
+}
+
+/* Add a command to the ACB. */
+static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
+ void *param, u32 param_size)
+{
+ struct mux_acbh *header;
+ struct mux_cmdh *cmdh;
+ struct mux_acb *acb;
+
+ acb = &ipc_mux->acb;
+ header = (struct mux_acbh *)(acb->skb)->data;
+ cmdh = (struct mux_cmdh *)
+ ((acb->skb)->data + le32_to_cpu(header->block_length));
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le32_to_cpu(header->block_length) +
+ le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
/* Prepare mux Command */
static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
u32 cmd, struct mux_acb *acb,
@@ -104,7 +147,7 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
size_t res_size, bool blocking, bool respond)
{
struct mux_acb *acb = &ipc_mux->acb;
- struct mux_lite_cmdh *ack_lite;
+ union mux_type_cmdh cmdh;
int ret = 0;
acb->if_id = if_id;
@@ -112,11 +155,23 @@ int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
if (ret)
return ret;
- ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
- res_size);
- if (respond)
- ack_lite->transaction_id = cpu_to_le32(transaction_id);
+ if (ipc_mux->protocol == MUX_LITE) {
+ cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
+ param, res_size);
+ if (respond)
+ cmdh.ack_lite->transaction_id =
+ cpu_to_le32(transaction_id);
+ } else {
+ /* Initialize the ACB header. */
+ ipc_mux_acb_init(ipc_mux);
+ cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
+ res_size);
+
+ if (respond)
+ cmdh.ack_aggr->transaction_id =
+ cpu_to_le32(transaction_id);
+ }
ret = ipc_mux_acb_send(ipc_mux, blocking);
return ret;
@@ -129,15 +184,17 @@ void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
}
static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+ union mux_cmd_param param,
+ __le32 command_type, u8 if_id,
+ __le32 transaction_id)
{
struct mux_acb *acb = &ipc_mux->acb;
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_CMD_OPEN_SESSION_RESP:
case MUX_CMD_CLOSE_SESSION_RESP:
/* Resume the control application. */
- acb->got_param = cmdh->param;
+ acb->got_param = param;
break;
case MUX_LITE_CMD_FLOW_CTL_ACK:
@@ -147,8 +204,16 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
if (ipc_mux->protocol != MUX_LITE)
return -EINVAL;
- dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
- cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
+ dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
+ if_id, le32_to_cpu(transaction_id));
+ break;
+
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Lite version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol == MUX_LITE)
+ return -EINVAL;
break;
default:
@@ -156,38 +221,39 @@ static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
}
acb->wanted_response = MUX_CMD_INVALID;
- acb->got_response = le32_to_cpu(cmdh->command_type);
+ acb->got_response = le32_to_cpu(command_type);
complete(&ipc_mux->channel->ul_sem);
return 0;
}
-static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
+ union mux_cmd_param *param,
+ __le32 command_type, u8 if_id,
+ __le16 cmd_len, int size)
{
- union mux_cmd_param *param = &cmdh->param;
struct mux_session *session;
- int new_size;
+ struct hrtimer *adb_timer;
dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
- cmdh->if_id, le32_to_cpu(cmdh->command_type));
+ if_id, le32_to_cpu(command_type));
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_LITE_CMD_FLOW_CTL:
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
- if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "if_id [%d] not valid",
- cmdh->if_id);
+ if_id);
return -EINVAL; /* No session interface id. */
}
- session = &ipc_mux->session[cmdh->if_id];
+ session = &ipc_mux->session[if_id];
+ adb_timer = &ipc_mux->imem->adb_timer;
- new_size = offsetof(struct mux_lite_cmdh, param) +
- sizeof(param->flow_ctl);
if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
@@ -197,6 +263,16 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
* to limit uplink session queueing
*/
session->net_tx_stop = true;
+
+ /* We have to call Finish ADB here.
+ * Otherwise any already queued data
+ * will be sent to CP when ADB is full
+ * for some other sessions.
+ */
+ if (ipc_mux->protocol == MUX_AGGREGATION) {
+ ipc_mux_ul_adb_finish(ipc_mux);
+ ipc_imem_hrtimer_stop(adb_timer);
+ }
/* Update the stats */
session->flow_ctl_en_cnt++;
} else if (param->flow_ctl.mask == 0) {
@@ -205,8 +281,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
* our internal Tx flag and enabling kernel
* flow control
*/
+ dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
+ if_id, le32_to_cpu(param->flow_ctl.mask));
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
@@ -217,7 +295,10 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
break;
}
- dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
+ ipc_mux->acc_adb_size = 0;
+ ipc_mux->acc_payload_size = 0;
+
+ dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
le32_to_cpu(param->flow_ctl.mask));
break;
@@ -235,12 +316,20 @@ static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
__le32 trans_id = cmdh->transaction_id;
+ int size;
- if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->transaction_id)) {
/* Unable to decode command response indicates the cmd_type
* may be a command instead of response. So try to decoding it.
*/
- if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
+ size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->cmd_len, size)) {
/* Decoded command may need a response. Give the
* response according to the command type.
*/
@@ -349,7 +438,7 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
adgh = (struct mux_adgh *)block;
- if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
+ if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "invalid ADGH signature received");
return;
}
@@ -392,6 +481,192 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
ipc_mux->session[if_id].flush = 1;
}
+static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
+ struct mux_cmdh *cmdh, int size)
+{
+ u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
+ u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
+ u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
+ u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
+ union mux_cmd_param *cmd_p = NULL;
+ u32 cmd = link_st;
+ u32 trans_id;
+
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->cmd_len, size)) {
+ size = 0;
+ if (cmdh->command_type == cpu_to_le32(link_st)) {
+ cmd_p = &cmdh->param;
+ cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
+ } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
+ (cmdh->command_type == cpu_to_le32(fctl_dis))) {
+ cmd = fctl_ack;
+ } else {
+ return;
+ }
+ trans_id = le32_to_cpu(cmdh->transaction_id);
+ ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ trans_id, cmd_p, size, false, true);
+ }
+}
+
+/* Decode an aggregated command block. */
+static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_acbh *acbh;
+ struct mux_cmdh *cmdh;
+ u32 next_cmd_index;
+ u8 *block;
+ int size;
+
+ acbh = (struct mux_acbh *)(skb->data);
+ block = (u8 *)(skb->data);
+
+ next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
+ next_cmd_index = array_index_nospec(next_cmd_index,
+ sizeof(struct mux_cmdh));
+
+ while (next_cmd_index != 0) {
+ cmdh = (struct mux_cmdh *)&block[next_cmd_index];
+ next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->transaction_id)) {
+ size = offsetof(struct mux_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
+ }
+ }
+}
+
+/* process datagram */
+static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
+ struct mux_adth_dg *dg, struct sk_buff *skb,
+ int if_id, int nr_of_dg)
+{
+ u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
+ u32 packet_offset, i, rc;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index)
+ < sizeof(struct mux_adbh))
+ goto dg_error;
+
+ /* Is the packet inside of the ADB */
+ if (le32_to_cpu(dg->datagram_index) >=
+ le32_to_cpu(adbh->block_length)) {
+ goto dg_error;
+ } else {
+ packet_offset =
+ le32_to_cpu(dg->datagram_index) +
+ dl_head_pad_len;
+ /* Pass the packet to the netif layer. */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
+ packet_offset,
+ dg->service_class,
+ skb);
+ if (rc)
+ goto dg_error;
+ }
+ }
+ return 0;
+dg_error:
+ return -1;
+}
+
+/* Decode an aggregated data block. */
+static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ struct mux_adth_dg *dg;
+ struct iosm_wwan *wwan;
+ struct mux_adbh *adbh;
+ struct mux_adth *adth;
+ int nr_of_dg, if_id;
+ u32 adth_index;
+ u8 *block;
+
+ block = skb->data;
+ adbh = (struct mux_adbh *)block;
+
+ /* Process the aggregated datagram tables. */
+ adth_index = le32_to_cpu(adbh->first_table_index);
+
+ /* Has CP sent an empty ADB ? */
+ if (adth_index < 1) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ goto adb_decode_err;
+ }
+
+ /* Loop through mixed session tables. */
+ while (adth_index) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)(block + adth_index);
+
+ /* Get the interface id and map it to the netif id. */
+ if_id = adth->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
+ goto adb_decode_err;
+
+ if_id = array_index_nospec(if_id,
+ IPC_MEM_MUX_IP_SESSION_ENTRIES);
+
+ /* Is the session active ? */
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan)
+ goto adb_decode_err;
+
+ /* Consistency checks for aggregated datagram table. */
+ if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
+ goto adb_decode_err;
+
+ if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
+ sizeof(struct mux_adth_dg)))
+ goto adb_decode_err;
+
+ /* Calculate the number of datagrams. */
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ /* Is the datagram table empty ? */
+ if (nr_of_dg < 1) {
+ dev_err(ipc_mux->dev,
+ "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
+ adth_index, nr_of_dg,
+ le32_to_cpu(adth->next_table_index));
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ continue;
+ }
+
+ /* New aggregated datagram table. */
+ dg = &adth->dg;
+ if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
+ nr_of_dg) < 0)
+ goto adb_decode_err;
+
+ /* mark session for final flush */
+ ipc_mux->session[if_id].flush = 1;
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ }
+
+adb_decode_err:
+ return;
+}
+
+/**
+ * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
u32 signature;
@@ -403,14 +678,18 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
signature = le32_to_cpup((__le32 *)skb->data);
switch (signature) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
+ mux_dl_adb_decode(ipc_mux, skb);
+ break;
+ case IOSM_AGGR_MUX_SIG_ADGH:
ipc_mux_dl_adgh_decode(ipc_mux, skb);
break;
-
case MUX_SIG_FCTH:
ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
break;
-
+ case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
+ ipc_mux_dl_acb_decode(ipc_mux, skb);
+ break;
case MUX_SIG_CMDH:
ipc_mux_dl_cmd_decode(ipc_mux, skb);
break;
@@ -427,7 +706,10 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
{
/* Take the first element of the free list. */
struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+ u32 *next_tb_id;
int qlt_size;
+ u32 if_id;
if (!skb)
return -EBUSY; /* Wait for a free ADB skb. */
@@ -436,7 +718,37 @@ static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
switch (type) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
+
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ /* Initialize the ADBH. */
+ ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
+ memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
+ ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
+ ul_adb->adbh->block_length =
+ cpu_to_le32(sizeof(struct mux_adbh));
+ next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
+ ul_adb->next_table_index = next_tb_id;
+
+ /* Clear the local copy of DGs for new ADB */
+ memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
+
+ /* Clear the DG count and QLT updated status for new ADB */
+ for (if_id = 0; if_id < no_if; if_id++) {
+ ul_adb->dg_count[if_id] = 0;
+ ul_adb->qlt_updated[if_id] = 0;
+ }
+ break;
+
+ case IOSM_AGGR_MUX_SIG_ADGH:
/* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
@@ -506,6 +818,94 @@ static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
str, bytes);
}
+static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, int *out_offset)
+{
+ int i, qlt_size, offset = *out_offset;
+ struct mux_qlth *p_adb_qlt;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u16 adth_dg_size;
+ u32 *next_tb_id;
+
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ if (ul_adb->dg_count[i] > 0) {
+ adth_dg_size = offsetof(struct mux_adth, dg) +
+ ul_adb->dg_count[i] * sizeof(*dg);
+
+ *ul_adb->next_table_index = offset;
+ adth = (struct mux_adth *)&ul_adb->buf[offset];
+ next_tb_id = (unsigned int *)&adth->next_table_index;
+ ul_adb->next_table_index = next_tb_id;
+ offset += adth_dg_size;
+ adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
+ adth->if_id = i;
+ adth->table_length = cpu_to_le16(adth_dg_size);
+ adth_dg_size -= offsetof(struct mux_adth, dg);
+ memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
+ ul_adb->if_cnt++;
+ }
+
+ if (ul_adb->qlt_updated[i]) {
+ *ul_adb->next_table_index = offset;
+ p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
+ ul_adb->next_table_index =
+ (u32 *)&p_adb_qlt->next_table_index;
+ memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
+ offset += qlt_size;
+ }
+ }
+ *out_offset = offset;
+}
+
+/**
+ * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
+ * @ipc_mux: Pointer to MUX data-struct.
+ */
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
+{
+ bool ul_data_pend = false;
+ struct mux_adb *ul_adb;
+ unsigned long flags;
+ int offset;
+
+ ul_adb = &ipc_mux->ul_adb;
+ if (!ul_adb->dest_skb)
+ return;
+
+ offset = *ul_adb->next_table_index;
+ ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
+ ul_adb->adbh->block_length = cpu_to_le32(offset);
+
+ if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
+ ul_adb->dest_skb = NULL;
+ return;
+ }
+
+ *ul_adb->next_table_index = 0;
+ ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
+ skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
+
+ spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
+ __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
+
+ ul_adb->dest_skb = NULL;
+ /* Updates the TDs with ul_list */
+ ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
+
+ /* Delay the doorbell irq */
+ if (ul_data_pend)
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
+ ipc_mux->acc_payload_size += ul_adb->payload_size;
+ ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
+}
+
/* Allocates an ADB from the free list and initializes it with ADBH */
static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
struct mux_adb *adb, int *size_needed,
@@ -688,7 +1088,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
while (nr_of_pkts > 0) {
/* get destination skb allocated */
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
- MUX_SIG_ADGH)) {
+ IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "no reserved memory for ADGH");
return -ENOMEM;
}
@@ -720,7 +1120,7 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
memcpy(adb->buf + offset + pad_len, src_skb->data,
src_skb->len);
- adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
+ adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
adb->adgh->if_id = session_id;
adb->adgh->length =
cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
@@ -762,6 +1162,187 @@ static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
return adb_updated;
}
+/**
+ * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
+ * @ipc_mux: pointer to MUX instance data
+ * @p_adb: pointer to UL aggegated data block
+ * @session_id: session id
+ * @qlth_n_ql_size: Length (in bytes) of the datagram table
+ * @ul_list: pointer to skb buffer head
+ */
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list)
+{
+ int qlevel = ul_list->qlen;
+ struct mux_qlth *p_qlt;
+
+ p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
+
+ /* Initialize QLTH if not been done */
+ if (p_adb->qlt_updated[session_id] == 0) {
+ p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ p_qlt->if_id = session_id;
+ p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
+ p_qlt->reserved = 0;
+ p_qlt->reserved2 = 0;
+ }
+
+ /* Update Queue Level information always */
+ p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
+ p_adb->qlt_updated[session_id] = 1;
+}
+
+/* Update the next table index. */
+static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
+ int session_id,
+ struct sk_buff_head *ul_list,
+ struct mux_adth_dg *dg,
+ int aligned_size,
+ u32 qlth_n_ql_size,
+ struct mux_adb *adb,
+ struct sk_buff *src_skb)
+{
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ ipc_mux_ul_adb_finish(ipc_mux);
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH)) {
+ dev_kfree_skb(src_skb);
+ return -ENOMEM;
+ }
+ ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
+
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+ return 0;
+}
+
+/* Process encode session UL data. */
+static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
+ struct mux_adth_dg *dg,
+ struct sk_buff_head *ul_list,
+ struct sk_buff *src_skb, int session_id,
+ int pkt_to_send, u32 qlth_n_ql_size,
+ int *out_offset, int head_pad_len)
+{
+ int aligned_size;
+ int offset = *out_offset;
+ unsigned long flags;
+ int nr_of_skb = 0;
+
+ while (pkt_to_send > 0) {
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ pkt_to_send);
+ return -1;
+ }
+ aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size ||
+ ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
+ *adb->next_table_index = offset;
+ if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
+ ul_list, dg,
+ aligned_size,
+ qlth_n_ql_size, adb,
+ src_skb) < 0)
+ return -ENOMEM;
+ nr_of_skb = 0;
+ offset = le32_to_cpu(adb->adbh->block_length);
+ /* Load pointer to next available datagram entry */
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+ }
+ /* Add buffer without head padding to next pending transfer. */
+ memcpy(adb->buf + offset + head_pad_len,
+ src_skb->data, src_skb->len);
+ /* Setup datagram entry. */
+ dg->datagram_index = cpu_to_le32(offset);
+ dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
+ dg->service_class = (((struct sk_buff *)src_skb)->priority);
+ dg->reserved = 0;
+ adb->dg_cnt_total++;
+ adb->payload_size += le16_to_cpu(dg->datagram_length);
+ dg++;
+ adb->dg_count[session_id]++;
+ offset += aligned_size;
+ /* Remove the processed elements and free it. */
+ spin_lock_irqsave(&ul_list->lock, flags);
+ src_skb = __skb_dequeue(ul_list);
+ spin_unlock_irqrestore(&ul_list->lock, flags);
+
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+ pkt_to_send--;
+ }
+ *out_offset = offset;
+ return nr_of_skb;
+}
+
+/* Process encode session UL data to ADB. */
+static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list, struct mux_adb *adb,
+ int pkt_to_send)
+{
+ int adb_updated = -EINVAL;
+ int head_pad_len, offset;
+ struct sk_buff *src_skb = NULL;
+ struct mux_adth_dg *dg;
+ u32 qlth_n_ql_size;
+
+ /* If any of the opened session has set Flow Control ON then limit the
+ * UL data to mux_flow_ctrl_high_thresh_b bytes
+ */
+ if (ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+ return adb_updated;
+ }
+
+ qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+ head_pad_len = session->ul_head_pad_len;
+
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH))
+ return -ENOMEM;
+
+ offset = le32_to_cpu(adb->adbh->block_length);
+
+ if (ipc_mux->size_needed == 0)
+ ipc_mux->size_needed = offset;
+
+ /* Calculate the size needed for ADTH, QLTH and QL*/
+ if (adb->dg_count[session_id] == 0) {
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ }
+
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+
+ if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
+ session_id, pkt_to_send, qlth_n_ql_size, &offset,
+ head_pad_len) > 0) {
+ adb_updated = 1;
+ *adb->next_table_index = offset;
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ adb->adbh->block_length = cpu_to_le32(offset);
+ }
+
+ return adb_updated;
+}
+
bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
{
struct sk_buff_head *ul_list;
@@ -802,28 +1383,88 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
* -> try next session id.
*/
continue;
-
- updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
- ul_list, &ipc_mux->ul_adb,
- dg_n);
+ if (ipc_mux->protocol == MUX_LITE)
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
+ else
+ updated = mux_ul_adb_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
}
ipc_mux->adb_prep_ongoing = false;
return updated == 1;
}
-void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+/* Calculates the Payload from any given ADB. */
+static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
+ struct mux_adbh *p_adbh)
{
- struct mux_adgh *adgh;
- u16 adgh_len;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u32 payload_size = 0;
+ u32 next_table_idx;
+ int nr_of_dg, i;
+
+ /* Process the aggregated datagram tables. */
+ next_table_idx = le32_to_cpu(p_adbh->first_table_index);
+
+ if (next_table_idx < sizeof(struct mux_adbh)) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ return payload_size;
+ }
- adgh = (struct mux_adgh *)skb->data;
- adgh_len = le16_to_cpu(adgh->length);
+ while (next_table_idx != 0) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
- if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
- ipc_mux->ul_flow == MUX_UL)
- ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
- adgh_len;
+ if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ if (nr_of_dg <= 0)
+ return payload_size;
+
+ dg = &adth->dg;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index) <
+ sizeof(struct mux_adbh)) {
+ return payload_size;
+ }
+ payload_size +=
+ le16_to_cpu(dg->datagram_length);
+ }
+ }
+ next_table_idx = le32_to_cpu(adth->next_table_index);
+ }
+
+ return payload_size;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ union mux_type_header hr;
+ u16 adgh_len;
+ int payload;
+
+ if (ipc_mux->protocol == MUX_LITE) {
+ hr.adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(hr.adgh->length);
+ if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes =
+ ipc_mux->ul_data_pend_bytes - adgh_len;
+ } else {
+ hr.adbh = (struct mux_adbh *)(skb->data);
+ payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
+ ipc_mux->ul_data_pend_bytes -= payload;
+ }
if (ipc_mux->ul_flow == MUX_UL)
dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
@@ -846,10 +1487,13 @@ static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
/* Add session UL data to a ADB and ADGH */
ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
- if (ul_data_pend)
+ if (ul_data_pend) {
+ if (ipc_mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_mux->imem);
+
/* Delay the doorbell irq */
ipc_imem_td_update_timer_start(ipc_mux->imem);
-
+ }
/* reset the debounce flag */
ipc_mux->ev_mux_net_transmit_pending = false;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
index aae83db5cbb8..5d4e3b89542c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -13,6 +13,39 @@
*/
#define MUX_QUEUE_LEVEL 1
+/* ADB finish timer value */
+#define IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC (500 * 1000)
+
+/* Enables the flow control (Flow is not allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE 5
+
+/* Disables the flow control (Flow is allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE 6
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command
+ */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK 7
+
+/* Aggregation Protocol Command for report packet indicating link quality
+ */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT 8
+
+/* Response to a report packet */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP 9
+
+/* ACBH: Signature of the Aggregated Command Block Header. */
+#define IOSM_AGGR_MUX_SIG_ACBH 0x48424341
+
+/* ADTH: Signature of the Aggregated Datagram Table Header. */
+#define IOSM_AGGR_MUX_SIG_ADTH 0x48544441
+
+/* ADBH: Signature of the Aggregated Data Block Header. */
+#define IOSM_AGGR_MUX_SIG_ADBH 0x48424441
+
+/* ADGH: Signature of the Datagram Header. */
+#define IOSM_AGGR_MUX_SIG_ADGH 0x48474441
+
/* Size of the buffer for the IP MUX commands. */
#define MUX_MAX_UL_ACB_BUF_SIZE 256
@@ -53,6 +86,85 @@
#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
/**
+ * struct mux_cmdh - Structure of Command Header.
+ * @signature: Signature of the Command Header.
+ * @cmd_len: Length (in bytes) of the Aggregated Command Block.
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved. Set to zero.
+ * @next_cmd_index: Index (in bytes) to the next command in the buffer.
+ * @command_type: Command Enum. See table Session Management chapter for
+ * details.
+ * @transaction_id: The Transaction ID shall be unique to the command
+ * @param: Optional parameters used with the command.
+ */
+struct mux_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_cmd_index;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_acbh - Structure of the Aggregated Command Block Header.
+ * @signature: Signature of the Aggregated Command Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Command Block.
+ * @first_cmd_index: Index (in bytes) to the first command in the buffer.
+ */
+struct mux_acbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_cmd_index;
+};
+
+/**
+ * struct mux_adbh - Structure of the Aggregated Data Block Header.
+ * @signature: Signature of the Aggregated Data Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Data Block.
+ * @first_table_index: Index (in bytes) to the first Datagram Table in
+ * the buffer.
+ */
+struct mux_adbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_table_index;
+};
+
+/**
+ * struct mux_adth - Structure of the Aggregated Datagram Table Header.
+ * @signature: Signature of the Aggregated Datagram Table Header.
+ * @table_length: Length (in bytes) of the datagram table.
+ * @if_id: ID of the interface the datagrams in the table
+ * belong to.
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1) hint.
+ * @reserved: Reserved bits. Set to zero.
+ * @next_table_index: Index (in bytes) to the next Datagram Table in
+ * the buffer.
+ * @reserved2: Reserved bytes. Set to zero
+ * @dg: datagramm table with variable length
+ */
+struct mux_adth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_adth_dg dg;
+};
+
+/**
* struct mux_adgh - Aggregated Datagram Header.
* @signature: Signature of the Aggregated Datagram Header(0x48474441)
* @length: Length (in bytes) of the datagram header. This length
@@ -129,11 +241,25 @@ struct ipc_mem_lite_gen_tbl {
};
/**
- * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer
- * depending on Header.
- * @ipc_mux: Pointer to MUX data-struct
- * @skb: Pointer to ipc_skb.
+ * struct mux_type_cmdh - Structure of command header for mux lite and aggr
+ * @ack_lite: MUX Lite Command Header pointer
+ * @ack_aggr: Command Header pointer
*/
+union mux_type_cmdh {
+ struct mux_lite_cmdh *ack_lite;
+ struct mux_cmdh *ack_aggr;
+};
+
+/**
+ * struct mux_type_header - Structure of mux header type
+ * @adgh: Aggregated Datagram Header pointer
+ * @adbh: Aggregated Data Block Header pointer
+ */
+union mux_type_header {
+ struct mux_adgh *adgh;
+ struct mux_adbh *adbh;
+};
+
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
/**
@@ -147,7 +273,7 @@ void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
* @blocking: True for blocking send
* @respond: If true return transaction ID
*
- * Returns: 0 in success and failure value on error
+ * Returns: 0 in success and failure value on error
*/
int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
u32 transaction_id, union mux_cmd_param *param,
@@ -190,4 +316,10 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux);
*/
void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux);
+
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list);
+
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index d73894e2a84e..31f57b986df2 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -320,6 +320,7 @@ ret_fail:
static const struct pci_device_id iosm_ipc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
{}
};
MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
index 7d1f0cd7364c..844cf1fed994 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
@@ -14,6 +14,7 @@
/* Device ID */
#define INTEL_CP_DEVICE_7560_ID 0x7560
+#define INTEL_CP_DEVICE_7360_ID 0x7360
/* Define for BAR area usage */
#define IPC_DOORBELL_BAR0 0
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
index 5dfa2eba6014..17d46f4d2913 100644
--- a/drivers/net/wwan/qcom_bam_dmux.c
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -755,7 +755,7 @@ static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
return 0;
dmux->tx = dma_request_chan(dev, "tx");
- if (IS_ERR(dmux->rx)) {
+ if (IS_ERR(dmux->tx)) {
dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
dmux->tx = NULL;
bam_dmux_runtime_suspend(dev);
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 1508dc2a497b..b8c7843730ed 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -160,6 +160,42 @@ struct dentry *wwan_get_debugfs_dir(struct device *parent)
return wwandev->debugfs_dir;
}
EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+
+static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
+{
+ struct wwan_device *wwandev;
+
+ if (dev->type != &wwan_dev_type)
+ return 0;
+
+ wwandev = to_wwan_dev(dev);
+
+ return wwandev->debugfs_dir == dir;
+}
+
+static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+void wwan_put_debugfs_dir(struct dentry *dir)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+
+ /* wwan_dev_get_by_debugfs() also got a reference */
+ put_device(&wwandev->dev);
+ put_device(&wwandev->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
#endif
/* This function allocates and registers a new WWAN device OR if a WWAN device