summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c503.c8
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c59x.c32
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig39
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c18
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/arm/am79c961a.c35
-rw-r--r--drivers/net/arm/am79c961a.h1
-rw-r--r--drivers/net/arm/ep93xx_eth.c39
-rw-r--r--drivers/net/arm/ether1.c34
-rw-r--r--drivers/net/arm/ether1.h1
-rw-r--r--drivers/net/arm/ether3.c33
-rw-r--r--drivers/net/arm/ether3.h1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c2
-rw-r--r--drivers/net/atl1c/atl1c.h1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c4
-rw-r--r--drivers/net/atlx/atl2.c10
-rw-r--r--drivers/net/atp.c2
-rw-r--r--drivers/net/au1000_eth.c344
-rw-r--r--drivers/net/au1000_eth.h42
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/bcm63xx_enet.c62
-rw-r--r--drivers/net/bcm63xx_enet.h1
-rw-r--r--drivers/net/benet/be.h15
-rw-r--r--drivers/net/benet/be_cmds.c8
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_hw.h7
-rw-r--r--drivers/net/benet/be_main.c109
-rw-r--r--drivers/net/bfin_mac.c10
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bna/Makefile11
-rw-r--r--drivers/net/bna/bfa_cee.c291
-rw-r--r--drivers/net/bna/bfa_cee.h64
-rw-r--r--drivers/net/bna/bfa_defs.h243
-rw-r--r--drivers/net/bna/bfa_defs_cna.h223
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h244
-rw-r--r--drivers/net/bna/bfa_defs_status.h216
-rw-r--r--drivers/net/bna/bfa_ioc.c1738
-rw-r--r--drivers/net/bna/bfa_ioc.h301
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c392
-rw-r--r--drivers/net/bna/bfa_sm.h88
-rw-r--r--drivers/net/bna/bfa_wc.h69
-rw-r--r--drivers/net/bna/bfi.h392
-rw-r--r--drivers/net/bna/bfi_cna.h199
-rw-r--r--drivers/net/bna/bfi_ctreg.h637
-rw-r--r--drivers/net/bna/bfi_ll.h438
-rw-r--r--drivers/net/bna/bna.h654
-rw-r--r--drivers/net/bna/bna_ctrl.c3624
-rw-r--r--drivers/net/bna/bna_hw.h1491
-rw-r--r--drivers/net/bna/bna_txrx.c4209
-rw-r--r--drivers/net/bna/bna_types.h1128
-rw-r--r--drivers/net/bna/bnad.c3267
-rw-r--r--drivers/net/bna/bnad.h333
-rw-r--r--drivers/net/bna/bnad_ethtool.c1277
-rw-r--r--drivers/net/bna/cna.h81
-rw-r--r--drivers/net/bna/cna_fwimg.c64
-rw-r--r--drivers/net/bnx2.c46
-rw-r--r--drivers/net/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c42
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h12
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c233
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h130
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c8585
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h238
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c752
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h53
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/bonding/bond_main.c60
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_spi_slave.c4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c26
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c4
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cpmac.c39
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c24
-rw-r--r--drivers/net/cxgb3/regs.h4
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c7
-rw-r--r--drivers/net/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c94
-rw-r--r--drivers/net/cxgb4/sge.c19
-rw-r--r--drivers/net/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/cxgb4vf/sge.c3
-rw-r--r--drivers/net/declance.c2
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_main.c171
-rw-r--r--drivers/net/e1000e/82571.c31
-rw-r--r--drivers/net/e1000e/defines.h4
-rw-r--r--drivers/net/e1000e/lib.c10
-rw-r--r--drivers/net/e1000e/netdev.c52
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea.h7
-rw-r--r--drivers/net/ehea/ehea_main.c80
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c19
-rw-r--r--drivers/net/enic/vnic_dev.c25
-rw-r--r--drivers/net/enic/vnic_devcmd.h12
-rw-r--r--drivers/net/enic/vnic_enet.h2
-rw-r--r--drivers/net/enic/vnic_resource.h13
-rw-r--r--drivers/net/enic/vnic_rq.c6
-rw-r--r--drivers/net/enic/vnic_vic.c7
-rw-r--r--drivers/net/enic/vnic_wq.c6
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/eth16i.c4
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec_mpc52xx.c14
-rw-r--r--drivers/net/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/fsl_pq_mdio.c5
-rw-r--r--drivers/net/gianfar.c29
-rw-r--r--drivers/net/gianfar.h2
-rw-r--r--drivers/net/greth.c12
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/hp.c8
-rw-r--r--drivers/net/hydra.c13
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/core.h12
-rw-r--r--drivers/net/ibm_newemac/debug.c2
-rw-r--r--drivers/net/ibm_newemac/mal.c4
-rw-r--r--drivers/net/ibm_newemac/mal.h2
-rw-r--r--drivers/net/ibm_newemac/rgmii.c18
-rw-r--r--drivers/net/ibm_newemac/rgmii.h16
-rw-r--r--drivers/net/ibm_newemac/tah.c14
-rw-r--r--drivers/net/ibm_newemac/tah.h12
-rw-r--r--drivers/net/ibm_newemac/zmii.c18
-rw-r--r--drivers/net/ibm_newemac/zmii.h16
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/ibmveth.c985
-rw-r--r--drivers/net/ibmveth.h59
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_main.c17
-rw-r--r--drivers/net/igbvf/netdev.c2
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/sh_irda.c6
-rw-r--r--drivers/net/irda/via-ircc.c3
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h30
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c385
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1715
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c30
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c100
-rw-r--r--drivers/net/jme.h3
-rw-r--r--drivers/net/ks8851.c39
-rw-r--r--drivers/net/ll_temac_main.c14
-rw-r--r--drivers/net/mac8390.c48
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c99
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/alloc.c17
-rw-r--r--drivers/net/mlx4/en_ethtool.c173
-rw-r--r--drivers/net/mlx4/en_main.c24
-rw-r--r--drivers/net/mlx4/en_netdev.c28
-rw-r--r--drivers/net/mlx4/en_port.c32
-rw-r--r--drivers/net/mlx4/en_port.h14
-rw-r--r--drivers/net/mlx4/en_rx.c104
-rw-r--r--drivers/net/mlx4/en_selftest.c179
-rw-r--r--drivers/net/mlx4/en_tx.c20
-rw-r--r--drivers/net/mlx4/eq.c44
-rw-r--r--drivers/net/mlx4/fw.c15
-rw-r--r--drivers/net/mlx4/fw.h6
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mlx4_en.h39
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c58
-rw-r--r--drivers/net/myri_sbus.c10
-rw-r--r--drivers/net/myri_sbus.h2
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/niu.c64
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/ns83820.c53
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c16
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c102
-rw-r--r--drivers/net/pcmcia/3c589_cs.c31
-rw-r--r--drivers/net/pcmcia/axnet_cs.c235
-rw-r--r--drivers/net/pcmcia/com20020_cs.c54
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c108
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c55
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c86
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c180
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c174
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c164
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/plip.c9
-rw-r--r--drivers/net/pptp.c726
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/pxa168_eth.c1663
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/qlcnic/qlcnic.h108
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c267
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c75
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h23
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c77
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c310
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1240
-rw-r--r--drivers/net/qlge/qlge_main.c38
-rw-r--r--drivers/net/r6040.c67
-rw-r--r--drivers/net/r8169.c34
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c37
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c11
-rw-r--r--drivers/net/sfc/efx.c302
-rw-r--r--drivers/net/sfc/efx.h22
-rw-r--r--drivers/net/sfc/ethtool.c53
-rw-r--r--drivers/net/sfc/falcon.c14
-rw-r--r--drivers/net/sfc/net_driver.h103
-rw-r--r--drivers/net/sfc/nic.c182
-rw-r--r--drivers/net/sfc/rx.c73
-rw-r--r--drivers/net/sfc/selftest.c7
-rw-r--r--drivers/net/sfc/siena.c2
-rw-r--r--drivers/net/sfc/tx.c78
-rw-r--r--drivers/net/sh_eth.c6
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skfp/skfddi.c84
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/sky2.c3
-rw-r--r--drivers/net/slip.c91
-rw-r--r--drivers/net/slip.h9
-rw-r--r--drivers/net/smc91x.h37
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/Kconfig5
-rw-r--r--drivers/net/stmmac/common.h54
-rw-r--r--drivers/net/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c33
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c18
-rw-r--r--drivers/net/stmmac/dwmac100_core.c28
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c18
-rw-r--r--drivers/net/stmmac/dwmac_dma.h16
-rw-r--r--drivers/net/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/stmmac/enh_desc.c4
-rw-r--r--drivers/net/stmmac/norm_desc.c19
-rw-r--r--drivers/net/stmmac/stmmac.h9
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c23
-rw-r--r--drivers/net/stmmac/stmmac_main.c203
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c26
-rw-r--r--drivers/net/sunbmac.c26
-rw-r--r--drivers/net/sunbmac.h4
-rw-r--r--drivers/net/sundance.c50
-rw-r--r--drivers/net/sungem.c211
-rw-r--r--drivers/net/sungem_phy.c3
-rw-r--r--drivers/net/sunhme.c40
-rw-r--r--drivers/net/sunhme.h2
-rw-r--r--drivers/net/sunlance.c28
-rw-r--r--drivers/net/sunqe.c26
-rw-r--r--drivers/net/sunqe.h4
-rw-r--r--drivers/net/sunvnet.c50
-rw-r--r--drivers/net/tehuti.c34
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c160
-rw-r--r--drivers/net/tg3.h22
-rw-r--r--drivers/net/tlan.c8
-rw-r--r--drivers/net/tokenring/tms380tr.c6
-rw-r--r--drivers/net/tulip/de4x5.c20
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/interrupt.c77
-rw-r--r--drivers/net/tulip/tulip.h3
-rw-r--r--drivers/net/tulip/tulip_core.c10
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c15
-rw-r--r--drivers/net/typhoon.c48
-rw-r--r--drivers/net/ucc_geth.c8
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cx82310_eth.c346
-rw-r--r--drivers/net/usb/hso.c9
-rw-r--r--drivers/net/usb/ipheth.c12
-rw-r--r--drivers/net/usb/kaweth.c9
-rw-r--r--drivers/net/usb/usbnet.c22
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/via-velocity.h11
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vxge/vxge-main.c34
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/wan/c101.c2
-rw-r--r--drivers/net/wan/cycx_drv.c14
-rw-r--r--drivers/net/wan/cycx_main.c6
-rw-r--r--drivers/net/wan/farsync.c15
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/n2.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/z85230.c4
-rw-r--r--drivers/net/wd.c8
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/airo_cs.c74
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/atmel_cs.c25
-rw-r--r--drivers/net/wireless/b43/pcmcia.c13
-rw-r--r--drivers/net/wireless/b43/phy_n.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c136
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/if_cs.c16
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c12
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c30
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c62
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/ray_cs.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c33
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/net/xilinx_emaclite.c23
-rw-r--r--drivers/net/yellowfin.c2
363 files changed, 37232 insertions, 10491 deletions
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index baac246561b9..4777a1cbcd8d 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -337,10 +337,10 @@ el2_probe1(struct net_device *dev, int ioaddr)
/* Finish setting the board's parameters. */
ei_status.stop_page = EL2_MB1_STOP_PG;
ei_status.word16 = wordlength;
- ei_status.reset_8390 = &el2_reset_8390;
- ei_status.get_8390_hdr = &el2_get_8390_hdr;
- ei_status.block_input = &el2_block_input;
- ei_status.block_output = &el2_block_output;
+ ei_status.reset_8390 = el2_reset_8390;
+ ei_status.get_8390_hdr = el2_get_8390_hdr;
+ ei_status.block_input = el2_block_input;
+ ei_status.block_output = el2_block_output;
if (dev->irq == 2)
dev->irq = 9;
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 3bba835f1a21..cdf7226a7c43 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -662,7 +662,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
{
- char *ram_split[] = { "5:3", "3:1", "1:1", "3:5" };
+ static const char * const ram_split[] = {
+ "5:3", "3:1", "1:1", "3:5"
+ };
__u32 config;
EL3WINDOW(3);
vp->available_media = inw(ioaddr + Wn3_Options);
@@ -734,7 +736,7 @@ static int corkscrew_open(struct net_device *dev)
init_timer(&vp->timer);
vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
vp->timer.data = (unsigned long) dev;
- vp->timer.function = &corkscrew_timer; /* timer handler */
+ vp->timer.function = corkscrew_timer; /* timer handler */
add_timer(&vp->timer);
} else
dev->if_port = vp->default_media;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c754d88e5ec9..e31a6d1919c6 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,8 @@ struct vortex_private {
open:1,
medialock:1,
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
- large_frames:1; /* accept large frames */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
int drv_flags;
u16 status_enable;
u16 intr_enable;
@@ -646,7 +647,7 @@ struct vortex_private {
u16 io_size; /* Size of PCI region (for release_region) */
/* Serialises access to hardware other than MII and variables below.
- * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+ * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
spinlock_t lock;
spinlock_t mii_lock; /* Serialises access to MII */
@@ -1738,7 +1739,7 @@ vortex_open(struct net_device *dev)
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
- &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
+ boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
goto err;
}
@@ -1993,10 +1994,9 @@ vortex_error(struct net_device *dev, int status)
}
}
- if (status & RxEarly) { /* Rx early is unused. */
- vortex_rx(dev);
+ if (status & RxEarly) /* Rx early is unused. */
iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
+
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
@@ -2133,6 +2133,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, vp->cur_tx);
}
+ /*
+ * We can't allow a recursion from our interrupt handler back into the
+ * tx routine, as they take the same spin lock, and that causes
+ * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
+ * a bit
+ */
+ if (vp->handling_irq)
+ return NETDEV_TX_BUSY;
+
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2288,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id)
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
+ if (status & RxEarly)
+ vortex_rx(dev);
+ spin_unlock(&vp->window_lock);
vortex_error(dev, status);
+ spin_lock(&vp->window_lock);
+ window_set(vp, 7);
}
if (--work_done < 0) {
@@ -2335,11 +2349,13 @@ boomerang_interrupt(int irq, void *dev_id)
ioaddr = vp->ioaddr;
+
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
+ vp->handling_irq = 1;
status = ioread16(ioaddr + EL3_STATUS);
@@ -2447,6 +2463,7 @@ boomerang_interrupt(int irq, void *dev_id)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
+ vp->handling_irq = 0;
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
@@ -2971,7 +2988,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
pci_power_t state = 0;
if(VORTEX_PCI(vp))
@@ -2981,9 +2997,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
- spin_lock_irqsave(&vp->lock, flags);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
- spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4a4f6b81e32d..237d4ea5a416 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -561,7 +561,7 @@ rx_status_loop:
if (cp_rx_csum_ok(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb_put(skb, len);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ebe68395ecf8..53c4810b119e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -484,7 +484,7 @@ config XTENSA_XT2000_SONIC
config MIPS_AU1X00_ENET
tristate "MIPS AU1000 Ethernet support"
- depends on SOC_AU1X00
+ depends on MIPS_ALCHEMY
select PHYLIB
select CRC32
help
@@ -914,7 +914,7 @@ config SMC91X
tristate "SMC 91C9x/91C1xxx support"
select CRC32
select MII
- depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
+ depends on ARM || M32R || SUPERH || \
MIPS || BLACKFIN || MN10300 || COLDFIRE
help
This is a driver for SMC's 91x series of Ethernet chipsets,
@@ -928,6 +928,16 @@ config SMC91X
The module will be called smc91x. If you want to compile it as a
module, say M here and read <file:Documentation/kbuild/modules.txt>.
+config PXA168_ETH
+ tristate "Marvell pxa168 ethernet support"
+ depends on CPU_PXA168
+ select PHYLIB
+ help
+ This driver supports the pxa168 Ethernet ports.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pxa168_eth.
+
config NET_NETX
tristate "NetX Ethernet support"
select MII
@@ -2859,6 +2869,20 @@ config QLGE
To compile this driver as a module, choose M here: the module
will be called qlge.
+config BNA
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
+
+ For general information and support, go to the Brocade support
+ website at:
+
+ <http://support.brocade.com>
+
source "drivers/net/sfc/Kconfig"
source "drivers/net/benet/Kconfig"
@@ -3192,6 +3216,17 @@ config PPPOE
which contains instruction on how to use this driver (under
the heading "Kernel mode PPPoE").
+config PPTP
+ tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
+ help
+ Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
+
+ This driver requires pppd plugin to work in client mode or
+ modified pptpd (poptop) to work in server mode.
+ See http://accel-pptp.sourceforge.net/ for information how to
+ utilize this module.
+
config PPPOATM
tristate "PPP over ATM"
depends on ATM && PPP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 56e8c27f77ce..18a277709a2a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_ENIC) += enic/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_BE2NET) += benet/
obj-$(CONFIG_VMXNET3) += vmxnet3/
+obj-$(CONFIG_BNA) += bna/
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \
@@ -162,6 +163,7 @@ obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_PPPOL2TP) += pppox.o
+obj-$(CONFIG_PPTP) += pppox.o pptp.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -244,6 +246,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_SMSC911X) += smsc911x.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b9a591604e5b..41d9911202d0 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2033,7 +2033,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
skb->csum = htons(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
/* send it up */
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 585c25f4b60c..58a0ab4923ee 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -396,7 +396,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
event_count = coal_conf->rx_event_count;
if( timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT )
- return -EINVAL;
+ return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN, mmio+INTEN0);
@@ -409,7 +409,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
event_count = coal_conf->tx_event_count;
if( timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT )
- return -EINVAL;
+ return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV;
@@ -903,18 +903,18 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
}
/*
-This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
-*/
-static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
+ * This function reads the mib registers and returns the hardware statistics.
+ * It updates previous internal driver statistics with new values.
+ */
+static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
unsigned long flags;
- /* struct net_device_stats *prev_stats = &lp->prev_stats; */
- struct net_device_stats* new_stats = &lp->stats;
+ struct net_device_stats *new_stats = &dev->stats;
- if(!lp->opened)
- return &lp->stats;
+ if (!lp->opened)
+ return new_stats;
spin_lock_irqsave (&lp->lock, flags);
/* stats.rx_packets */
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index ac36eb6981e3..b5926af03a7e 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -787,7 +787,6 @@ struct amd8111e_priv{
struct vlan_group *vlgrp;
#endif
char opened;
- struct net_device_stats stats;
unsigned int drv_rx_errors;
struct amd8111e_coalesce_conf coal_conf;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 8c496fb1ac9e..62f21106efec 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -300,8 +300,6 @@ am79c961_open(struct net_device *dev)
struct dev_priv *priv = netdev_priv(dev);
int ret;
- memset (&priv->stats, 0, sizeof (priv->stats));
-
ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
if (ret)
return ret;
@@ -347,8 +345,7 @@ am79c961_close(struct net_device *dev)
*/
static struct net_device_stats *am79c961_getstats (struct net_device *dev)
{
- struct dev_priv *priv = netdev_priv(dev);
- return &priv->stats;
+ return &dev->stats;
}
static void am79c961_mc_hash(char *addr, unsigned short *hash)
@@ -510,14 +507,14 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
am_writeword (dev, hdraddr + 2, RMD_OWN);
- priv->stats.rx_errors ++;
+ dev->stats.rx_errors++;
if (status & RMD_ERR) {
if (status & RMD_FRAM)
- priv->stats.rx_frame_errors ++;
+ dev->stats.rx_frame_errors++;
if (status & RMD_CRC)
- priv->stats.rx_crc_errors ++;
+ dev->stats.rx_crc_errors++;
} else if (status & RMD_STP)
- priv->stats.rx_length_errors ++;
+ dev->stats.rx_length_errors++;
continue;
}
@@ -531,12 +528,12 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
am_writeword(dev, hdraddr + 2, RMD_OWN);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- priv->stats.rx_bytes += len;
- priv->stats.rx_packets ++;
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
} else {
am_writeword (dev, hdraddr + 2, RMD_OWN);
printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
- priv->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
break;
}
} while (1);
@@ -565,7 +562,7 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
if (status & TMD_ERR) {
u_int status2;
- priv->stats.tx_errors ++;
+ dev->stats.tx_errors++;
status2 = am_readword (dev, hdraddr + 6);
@@ -575,18 +572,18 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
am_writeword (dev, hdraddr + 6, 0);
if (status2 & TST_RTRY)
- priv->stats.collisions += 16;
+ dev->stats.collisions += 16;
if (status2 & TST_LCOL)
- priv->stats.tx_window_errors ++;
+ dev->stats.tx_window_errors++;
if (status2 & TST_LCAR)
- priv->stats.tx_carrier_errors ++;
+ dev->stats.tx_carrier_errors++;
if (status2 & TST_UFLO)
- priv->stats.tx_fifo_errors ++;
+ dev->stats.tx_fifo_errors++;
continue;
}
- priv->stats.tx_packets ++;
+ dev->stats.tx_packets++;
len = am_readword (dev, hdraddr + 4);
- priv->stats.tx_bytes += -len;
+ dev->stats.tx_bytes += -len;
} while (priv->txtail != priv->txhead);
netif_wake_queue(dev);
@@ -616,7 +613,7 @@ am79c961_interrupt(int irq, void *dev_id)
}
if (status & CSR0_MISS) {
handled = 1;
- priv->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
}
if (status & CSR0_CERR) {
handled = 1;
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
index 483009fe6ec2..fd634d32756b 100644
--- a/drivers/net/arm/am79c961a.h
+++ b/drivers/net/arm/am79c961a.h
@@ -130,7 +130,6 @@
#define ISALED0_LNKST 0x8000
struct dev_priv {
- struct net_device_stats stats;
unsigned long rxbuffer[RX_BUFFERS];
unsigned long txbuffer[TX_BUFFERS];
unsigned char txhead;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 4a5ec9470aa1..5a77001b6d10 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -175,8 +175,6 @@ struct ep93xx_priv
struct net_device *dev;
struct napi_struct napi;
- struct net_device_stats stats;
-
struct mii_if_info mii;
u8 mdc_divisor;
};
@@ -230,12 +228,6 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
pr_info("mdio write timed out\n");
}
-static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- return &(ep->stats);
-}
-
static int ep93xx_rx(struct net_device *dev, int processed, int budget)
{
struct ep93xx_priv *ep = netdev_priv(dev);
@@ -267,15 +259,15 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
- ep->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (rstat0 & RSTAT0_OE)
- ep->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
if (rstat0 & RSTAT0_FE)
- ep->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
- ep->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (rstat0 & RSTAT0_CRCE)
- ep->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
goto err;
}
@@ -300,10 +292,10 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
netif_receive_skb(skb);
- ep->stats.rx_packets++;
- ep->stats.rx_bytes += length;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length;
} else {
- ep->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
err:
@@ -359,7 +351,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
int entry;
if (unlikely(skb->len > MAX_PKT_SIZE)) {
- ep->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -415,17 +407,17 @@ static void ep93xx_tx_complete(struct net_device *dev)
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
- ep->stats.tx_packets++;
- ep->stats.tx_bytes += length;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += length;
} else {
- ep->stats.tx_errors++;
+ dev->stats.tx_errors++;
}
if (tstat0 & TSTAT0_OW)
- ep->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
if (tstat0 & TSTAT0_TXU)
- ep->stats.tx_fifo_errors++;
- ep->stats.collisions += (tstat0 >> 16) & 0x1f;
+ dev->stats.tx_fifo_errors++;
+ dev->stats.collisions += (tstat0 >> 16) & 0x1f;
ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
if (ep->tx_pending == TX_QUEUE_ENTRIES)
@@ -758,7 +750,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_open = ep93xx_open,
.ndo_stop = ep93xx_close,
.ndo_start_xmit = ep93xx_xmit,
- .ndo_get_stats = ep93xx_get_stats,
.ndo_do_ioctl = ep93xx_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index b17ab5153f51..b00781c02d5d 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -68,7 +68,6 @@ static int ether1_open(struct net_device *dev);
static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
-static struct net_device_stats *ether1_getstats(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
static void ether1_timeout(struct net_device *dev);
@@ -649,8 +648,6 @@ ether1_open (struct net_device *dev)
if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
return -EAGAIN;
- memset (&priv(dev)->stats, 0, sizeof (struct net_device_stats));
-
if (ether1_init_for_open (dev)) {
free_irq (dev->irq, dev);
return -EAGAIN;
@@ -673,7 +670,7 @@ ether1_timeout(struct net_device *dev)
if (ether1_init_for_open (dev))
printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
- priv(dev)->stats.tx_errors++;
+ dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -802,21 +799,21 @@ again:
while (nop.nop_status & STAT_COMPLETE) {
if (nop.nop_status & STAT_OK) {
- priv(dev)->stats.tx_packets ++;
- priv(dev)->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
+ dev->stats.tx_packets++;
+ dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
} else {
- priv(dev)->stats.tx_errors ++;
+ dev->stats.tx_errors++;
if (nop.nop_status & STAT_COLLAFTERTX)
- priv(dev)->stats.collisions ++;
+ dev->stats.collisions++;
if (nop.nop_status & STAT_NOCARRIER)
- priv(dev)->stats.tx_carrier_errors ++;
+ dev->stats.tx_carrier_errors++;
if (nop.nop_status & STAT_TXLOSTCTS)
printk (KERN_WARNING "%s: cts lost\n", dev->name);
if (nop.nop_status & STAT_TXSLOWDMA)
- priv(dev)->stats.tx_fifo_errors ++;
+ dev->stats.tx_fifo_errors++;
if (nop.nop_status & STAT_COLLEXCESSIVE)
- priv(dev)->stats.collisions += 16;
+ dev->stats.collisions += 16;
}
if (nop.nop_link == caddr) {
@@ -879,13 +876,13 @@ ether1_recv_done (struct net_device *dev)
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
- priv(dev)->stats.rx_packets ++;
+ dev->stats.rx_packets++;
} else
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
} else {
printk(KERN_WARNING "%s: %s\n", dev->name,
(rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
}
nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
@@ -939,7 +936,7 @@ ether1_interrupt (int irq, void *dev_id)
printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
writeb(CTRL_CA, REG_CONTROL);
- priv(dev)->stats.rx_dropped ++; /* we suspended due to lack of buffer space */
+ dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
} else
printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
@@ -962,12 +959,6 @@ ether1_close (struct net_device *dev)
return 0;
}
-static struct net_device_stats *
-ether1_getstats (struct net_device *dev)
-{
- return &priv(dev)->stats;
-}
-
/*
* Set or clear the multicast filter for this adaptor.
* num_addrs == -1 Promiscuous mode, receive all packets.
@@ -994,7 +985,6 @@ static const struct net_device_ops ether1_netdev_ops = {
.ndo_open = ether1_open,
.ndo_stop = ether1_close,
.ndo_start_xmit = ether1_sendpacket,
- .ndo_get_stats = ether1_getstats,
.ndo_set_multicast_list = ether1_setmulticastlist,
.ndo_tx_timeout = ether1_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h
index c8a4b2389d85..3a5830ab3dc7 100644
--- a/drivers/net/arm/ether1.h
+++ b/drivers/net/arm/ether1.h
@@ -38,7 +38,6 @@
struct ether1_priv {
void __iomem *base;
- struct net_device_stats stats;
unsigned int tx_link;
unsigned int tx_head;
volatile unsigned int tx_tail;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1361b7367c28..44a8746f4014 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -81,7 +81,6 @@ static int ether3_open (struct net_device *dev);
static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
-static struct net_device_stats *ether3_getstats (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
static void ether3_timeout(struct net_device *dev);
@@ -323,8 +322,6 @@ ether3_init_for_open(struct net_device *dev)
{
int i;
- memset(&priv(dev)->stats, 0, sizeof(struct net_device_stats));
-
/* Reset the chip */
ether3_outw(CFG2_RESET, REG_CONFIG2);
udelay(4);
@@ -442,15 +439,6 @@ ether3_close(struct net_device *dev)
}
/*
- * Get the current statistics. This may be called with the card open or
- * closed.
- */
-static struct net_device_stats *ether3_getstats(struct net_device *dev)
-{
- return &priv(dev)->stats;
-}
-
-/*
* Set or clear promiscuous/multicast mode filter for this adaptor.
*
* We don't attempt any packet filtering. The card may have a SEEQ 8004
@@ -490,7 +478,7 @@ static void ether3_timeout(struct net_device *dev)
local_irq_restore(flags);
priv(dev)->regs.config2 |= CFG2_CTRLO;
- priv(dev)->stats.tx_errors += 1;
+ dev->stats.tx_errors += 1;
ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
priv(dev)->tx_head = priv(dev)->tx_tail = 0;
@@ -509,7 +497,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (priv(dev)->broken) {
dev_kfree_skb(skb);
- priv(dev)->stats.tx_dropped ++;
+ dev->stats.tx_dropped++;
netif_start_queue(dev);
return NETDEV_TX_OK;
}
@@ -673,7 +661,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
} else
goto dropping;
} else {
- struct net_device_stats *stats = &priv(dev)->stats;
+ struct net_device_stats *stats = &dev->stats;
ether3_outw(next_ptr >> 8, REG_RECVEND);
if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
@@ -685,14 +673,14 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
while (-- maxcnt);
done:
- priv(dev)->stats.rx_packets += received;
+ dev->stats.rx_packets += received;
priv(dev)->rx_head = next_ptr;
/*
* If rx went off line, then that means that the buffer may be full. We
* have dropped at least one packet.
*/
if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
ether3_outw(next_ptr, REG_RECVPTR);
ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
}
@@ -710,7 +698,7 @@ dropping:{
last_warned = jiffies;
printk("%s: memory squeeze, dropping packet.\n", dev->name);
}
- priv(dev)->stats.rx_dropped ++;
+ dev->stats.rx_dropped++;
goto done;
}
}
@@ -743,13 +731,13 @@ static void ether3_tx(struct net_device *dev)
* Update errors
*/
if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
- priv(dev)->stats.tx_packets++;
+ dev->stats.tx_packets++;
else {
- priv(dev)->stats.tx_errors ++;
+ dev->stats.tx_errors++;
if (status & TXSTAT_16COLLISIONS)
- priv(dev)->stats.collisions += 16;
+ dev->stats.collisions += 16;
if (status & TXSTAT_BABBLED)
- priv(dev)->stats.tx_fifo_errors ++;
+ dev->stats.tx_fifo_errors++;
}
tx_tail = (tx_tail + 1) & 15;
@@ -773,7 +761,6 @@ static const struct net_device_ops ether3_netdev_ops = {
.ndo_open = ether3_open,
.ndo_stop = ether3_close,
.ndo_start_xmit = ether3_sendpacket,
- .ndo_get_stats = ether3_getstats,
.ndo_set_multicast_list = ether3_setmulticastlist,
.ndo_tx_timeout = ether3_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether3.h b/drivers/net/arm/ether3.h
index 1921a3a07da7..2db63b08bdf3 100644
--- a/drivers/net/arm/ether3.h
+++ b/drivers/net/arm/ether3.h
@@ -164,7 +164,6 @@ struct dev_priv {
unsigned char tx_head; /* buffer nr to insert next packet */
unsigned char tx_tail; /* buffer nr of transmitting packet */
unsigned int rx_head; /* address to fetch next packet from */
- struct net_device_stats stats;
struct timer_list timer;
int broken; /* 0 = ok, 1 = something went wrong */
};
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 4f1cc7164ad9..6028226a7270 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -241,7 +241,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static spinlock_t mdio_lock;
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
-struct mii_bus *mdio_bus;
+static struct mii_bus *mdio_bus;
static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 52abbbdf8a08..ef4115b897bf 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -559,7 +559,6 @@ struct atl1c_adapter {
struct napi_struct napi;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
- struct net_device_stats net_stats;
struct mii_if_info mii; /* MII interface info */
u16 rx_buffer_len;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index d8501f060957..919080b2c3a5 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -480,7 +480,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
}
if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
- || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
+ || hw->nic_type == athr_l2c) {
atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
}
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index c7b8ef507ebd..553230eb365c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1562,7 +1562,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
- struct net_device_stats *net_stats = &adapter->net_stats;
+ struct net_device_stats *net_stats = &netdev->stats;
atl1c_update_hw_stats(adapter);
net_stats->rx_packets = hw_stats->rx_ok;
@@ -1590,7 +1590,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
- return &adapter->net_stats;
+ return net_stats;
}
static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
@@ -1700,7 +1700,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
/* link event */
if (status & (ISR_GPHY | ISR_MANUAL)) {
- adapter->net_stats.tx_carrier_errors++;
+ netdev->stats.tx_carrier_errors++;
atl1c_link_chg_event(adapter);
break;
}
@@ -1719,7 +1719,7 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
* cannot figure out if the packet is fragmented or not,
* so we tell the KERNEL CHECKSUM_NONE
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 1acea5774e89..56ace3fbe40d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1331,7 +1331,7 @@ static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
u16 pkt_flags;
u16 err_flags;
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
pkt_flags = prrs->pkt_flag;
err_flags = prrs->err_flag;
if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
@@ -2316,7 +2316,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = &atl1e_phy_config;
+ adapter->phy_config_timer.function = atl1e_phy_config;
adapter->phy_config_timer.data = (unsigned long) adapter;
/* get user settings */
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0cc67e..e1e0171d6e62 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1805,7 +1805,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
* the higher layers and let it be sorted out there.
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
@@ -3036,7 +3036,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- setup_timer(&adapter->phy_config_timer, &atl1_phy_config,
+ setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
adapter->phy_timer_pending = false;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 8da87383fb39..29c0265ccc5d 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -51,10 +51,10 @@
#define ATL2_DRV_VERSION "2.2.3"
-static char atl2_driver_name[] = "atl2";
+static const char atl2_driver_name[] = "atl2";
static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
-static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
-static char atl2_driver_version[] = ATL2_DRV_VERSION;
+static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
+static const char atl2_driver_version[] = ATL2_DRV_VERSION;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -1444,11 +1444,11 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
atl2_check_options(adapter);
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &atl2_watchdog;
+ adapter->watchdog_timer.function = atl2_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_config_timer);
- adapter->phy_config_timer.function = &atl2_phy_config;
+ adapter->phy_config_timer.function = atl2_phy_config;
adapter->phy_config_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->reset_task, atl2_reset_task);
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index bd2f9d331dac..dfd96b20547f 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -445,7 +445,7 @@ static int net_open(struct net_device *dev)
init_timer(&lp->timer);
lp->timer.expires = jiffies + TIMED_CHECKER;
lp->timer.data = (unsigned long)dev;
- lp->timer.function = &atp_timed_checker; /* timer handler */
+ lp->timer.function = atp_timed_checker; /* timer handler */
add_timer(&lp->timer);
netif_start_queue(dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 386d4feec652..43489f89c142 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -13,7 +13,7 @@
* converted to use linux-2.6.x's PHY framework
*
* Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
+ * ppopov@mvista.com or source@mvista.com
*
* ########################################################################
*
@@ -34,6 +34,8 @@
*
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -56,11 +58,11 @@
#include <linux/crc32.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
-#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <asm/processor.h>
#include <au1000.h>
@@ -104,14 +106,6 @@ MODULE_VERSION(DRV_VERSION);
* complete immediately.
*/
-/* These addresses are only used if yamon doesn't tell us what
- * the mac address is, and the mac address is not passed on the
- * command line.
- */
-static unsigned char au1000_mac_addr[6] __devinitdata = {
- 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
-};
-
struct au1000_private *au_macs[NUM_ETH_INTERFACES];
/*
@@ -160,11 +154,11 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
spin_lock_irqsave(&aup->lock, flags);
- if(force_reset || (!aup->mac_enabled)) {
- *aup->enable = MAC_EN_CLOCK_ENABLE;
+ if (force_reset || (!aup->mac_enabled)) {
+ writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
au_sync_delay(2);
- *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE);
+ writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE), &aup->enable);
au_sync_delay(2);
aup->mac_enabled = 1;
@@ -179,12 +173,12 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
{
struct au1000_private *aup = netdev_priv(dev);
- volatile u32 *const mii_control_reg = &aup->mac->mii_control;
- volatile u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
u32 timedout = 20;
u32 mii_control;
- while (*mii_control_reg & MAC_MII_BUSY) {
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
netdev_err(dev, "read_MII busy timeout!!\n");
@@ -195,29 +189,29 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
mii_control = MAC_SET_MII_SELECT_REG(reg) |
MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
- *mii_control_reg = mii_control;
+ writel(mii_control, mii_control_reg);
timedout = 20;
- while (*mii_control_reg & MAC_MII_BUSY) {
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
netdev_err(dev, "mdio_read busy timeout!!\n");
return -1;
}
}
- return (int)*mii_data_reg;
+ return readl(mii_data_reg);
}
static void au1000_mdio_write(struct net_device *dev, int phy_addr,
int reg, u16 value)
{
struct au1000_private *aup = netdev_priv(dev);
- volatile u32 *const mii_control_reg = &aup->mac->mii_control;
- volatile u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
u32 timedout = 20;
u32 mii_control;
- while (*mii_control_reg & MAC_MII_BUSY) {
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
netdev_err(dev, "mdio_write busy timeout!!\n");
@@ -228,18 +222,22 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
mii_control = MAC_SET_MII_SELECT_REG(reg) |
MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
- *mii_data_reg = value;
- *mii_control_reg = mii_control;
+ writel(value, mii_data_reg);
+ writel(mii_control, mii_control_reg);
}
static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
/* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
- * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
+ * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
+ */
struct net_device *const dev = bus->priv;
- au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
- * mii_bus is enabled */
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
return au1000_mdio_read(dev, phy_addr, regnum);
}
@@ -248,8 +246,11 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
{
struct net_device *const dev = bus->priv;
- au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
- * mii_bus is enabled */
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
au1000_mdio_write(dev, phy_addr, regnum, value);
return 0;
}
@@ -258,28 +259,37 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
{
struct net_device *const dev = bus->priv;
- au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
- * mii_bus is enabled */
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
return 0;
}
static void au1000_hard_stop(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
netif_dbg(aup, drv, dev, "hard stop\n");
- aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ reg = readl(&aup->mac->control);
+ reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
au_sync_delay(10);
}
static void au1000_enable_rx_tx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
netif_dbg(aup, hw, dev, "enable_rx_tx\n");
- aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ reg = readl(&aup->mac->control);
+ reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
au_sync_delay(10);
}
@@ -289,6 +299,7 @@ au1000_adjust_link(struct net_device *dev)
struct au1000_private *aup = netdev_priv(dev);
struct phy_device *phydev = aup->phy_dev;
unsigned long flags;
+ u32 reg;
int status_change = 0;
@@ -320,14 +331,15 @@ au1000_adjust_link(struct net_device *dev)
/* switching duplex mode requires to disable rx and tx! */
au1000_hard_stop(dev);
- if (DUPLEX_FULL == phydev->duplex)
- aup->mac->control = ((aup->mac->control
- | MAC_FULL_DUPLEX)
- & ~MAC_DISABLE_RX_OWN);
- else
- aup->mac->control = ((aup->mac->control
- & ~MAC_FULL_DUPLEX)
- | MAC_DISABLE_RX_OWN);
+ reg = readl(&aup->mac->control);
+ if (DUPLEX_FULL == phydev->duplex) {
+ reg |= MAC_FULL_DUPLEX;
+ reg &= ~MAC_DISABLE_RX_OWN;
+ } else {
+ reg &= ~MAC_FULL_DUPLEX;
+ reg |= MAC_DISABLE_RX_OWN;
+ }
+ writel(reg, &aup->mac->control);
au_sync_delay(1);
au1000_enable_rx_tx(dev);
@@ -361,10 +373,11 @@ au1000_adjust_link(struct net_device *dev)
}
}
-static int au1000_mii_probe (struct net_device *dev)
+static int au1000_mii_probe(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
+ int phy_addr;
if (aup->phy_static_config) {
BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
@@ -374,42 +387,46 @@ static int au1000_mii_probe (struct net_device *dev)
else
netdev_info(dev, "using PHY-less setup\n");
return 0;
- } else {
- int phy_addr;
-
- /* find the first (lowest address) PHY on the current MAC's MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (aup->mii_bus->phy_map[phy_addr]) {
- phydev = aup->mii_bus->phy_map[phy_addr];
- if (!aup->phy_search_highest_addr)
- break; /* break out with first one found */
- }
-
- if (aup->phy1_search_mac0) {
- /* try harder to find a PHY */
- if (!phydev && (aup->mac_id == 1)) {
- /* no PHY found, maybe we have a dual PHY? */
- dev_info(&dev->dev, ": no PHY found on MAC1, "
- "let's see if it's attached to MAC0...\n");
-
- /* find the first (lowest address) non-attached PHY on
- * the MAC0 MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- struct phy_device *const tmp_phydev =
- aup->mii_bus->phy_map[phy_addr];
-
- if (aup->mac_id == 1)
- break;
-
- if (!tmp_phydev)
- continue; /* no PHY here... */
+ }
- if (tmp_phydev->attached_dev)
- continue; /* already claimed by MAC0 */
+ /* find the first (lowest address) PHY
+ * on the current MAC's MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus->phy_map[phy_addr]) {
+ phydev = aup->mii_bus->phy_map[phy_addr];
+ if (!aup->phy_search_highest_addr)
+ /* break out with first one found */
+ break;
+ }
- phydev = tmp_phydev;
- break; /* found it */
- }
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ /* find the first (lowest address) non-attached
+ * PHY on the MAC0 MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ aup->mii_bus->phy_map[phy_addr];
+
+ if (aup->mac_id == 1)
+ break;
+
+ /* no PHY here... */
+ if (!tmp_phydev)
+ continue;
+
+ /* already claimed by MAC0 */
+ if (tmp_phydev->attached_dev)
+ continue;
+
+ phydev = tmp_phydev;
+ break; /* found it */
}
}
}
@@ -460,20 +477,20 @@ static int au1000_mii_probe (struct net_device *dev)
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
+static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
{
- db_dest_t *pDB;
+ struct db_dest *pDB;
pDB = aup->pDBfree;
- if (pDB) {
+ if (pDB)
aup->pDBfree = pDB->pnext;
- }
+
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
- db_dest_t *pDBfree = aup->pDBfree;
+ struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
pDBfree->pnext = pDB;
aup->pDBfree = pDB;
@@ -486,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
- *aup->enable = MAC_EN_CLOCK_ENABLE;
+ writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
au_sync_delay(2);
- *aup->enable = 0;
+ writel(0, &aup->enable);
au_sync_delay(2);
aup->tx_full = 0;
@@ -515,7 +532,7 @@ static void au1000_reset_mac(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
- au1000_reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked(dev);
spin_unlock_irqrestore(&aup->lock, flags);
}
@@ -532,11 +549,13 @@ au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
for (i = 0; i < NUM_RX_DMA; i++) {
aup->rx_dma_ring[i] =
- (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
+ (struct rx_dma *)
+ (rx_base + sizeof(struct rx_dma)*i);
}
for (i = 0; i < NUM_TX_DMA; i++) {
aup->tx_dma_ring[i] =
- (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
+ (struct tx_dma *)
+ (tx_base + sizeof(struct tx_dma)*i);
}
}
@@ -624,18 +643,21 @@ static int au1000_init(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
- aup->mac->control = 0;
+ writel(0, &aup->mac->control);
aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
aup->tx_tail = aup->tx_head;
aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
- aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
- aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
- dev->dev_addr[1]<<8 | dev->dev_addr[0];
+ writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
+ &aup->mac->mac_addr_high);
+ writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
+ dev->dev_addr[1]<<8 | dev->dev_addr[0],
+ &aup->mac->mac_addr_low);
- for (i = 0; i < NUM_RX_DMA; i++) {
+
+ for (i = 0; i < NUM_RX_DMA; i++)
aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
- }
+
au_sync();
control = MAC_RX_ENABLE | MAC_TX_ENABLE;
@@ -651,8 +673,8 @@ static int au1000_init(struct net_device *dev)
control |= MAC_FULL_DUPLEX;
}
- aup->mac->control = control;
- aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
+ writel(control, &aup->mac->control);
+ writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
au_sync();
spin_unlock_irqrestore(&aup->lock, flags);
@@ -689,9 +711,9 @@ static int au1000_rx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
struct sk_buff *skb;
- volatile rx_dma_t *prxd;
+ struct rx_dma *prxd;
u32 buff_stat, status;
- db_dest_t *pDB;
+ struct db_dest *pDB;
u32 frmlen;
netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
@@ -721,24 +743,26 @@ static int au1000_rx(struct net_device *dev)
netif_rx(skb); /* pass the packet to upper layers */
} else {
if (au1000_debug > 4) {
+ pr_err("rx_error(s):");
if (status & RX_MISSED_FRAME)
- printk("rx miss\n");
+ pr_cont(" miss");
if (status & RX_WDOG_TIMER)
- printk("rx wdog\n");
+ pr_cont(" wdog");
if (status & RX_RUNT)
- printk("rx runt\n");
+ pr_cont(" runt");
if (status & RX_OVERLEN)
- printk("rx overlen\n");
+ pr_cont(" overlen");
if (status & RX_COLL)
- printk("rx coll\n");
+ pr_cont(" coll");
if (status & RX_MII_ERROR)
- printk("rx mii error\n");
+ pr_cont(" mii error");
if (status & RX_CRC_ERROR)
- printk("rx crc error\n");
+ pr_cont(" crc error");
if (status & RX_LEN_ERROR)
- printk("rx len error\n");
+ pr_cont(" len error");
if (status & RX_U_CNTRL_FRAME)
- printk("rx u control frame\n");
+ pr_cont(" u control frame");
+ pr_cont("\n");
}
}
prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
@@ -761,7 +785,8 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
/* any other tx errors are only valid
- * in half duplex mode */
+ * in half duplex mode
+ */
ps->tx_errors++;
ps->tx_aborted_errors++;
}
@@ -782,7 +807,7 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
static void au1000_tx_ack(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- volatile tx_dma_t *ptxd;
+ struct tx_dma *ptxd;
ptxd = aup->tx_dma_ring[aup->tx_tail];
@@ -862,7 +887,7 @@ static int au1000_close(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
- au1000_reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked(dev);
/* stop the device */
netif_stop_queue(dev);
@@ -881,9 +906,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
- volatile tx_dma_t *ptxd;
+ struct tx_dma *ptxd;
u32 buff_stat;
- db_dest_t *pDB;
+ struct db_dest *pDB;
int i;
netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
@@ -910,9 +935,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
pDB = aup->tx_db_inuse[aup->tx_head];
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
- for (i = skb->len; i < ETH_ZLEN; i++) {
+ for (i = skb->len; i < ETH_ZLEN; i++)
((char *)pDB->vaddr)[i] = 0;
- }
+
ptxd->len = ETH_ZLEN;
} else
ptxd->len = skb->len;
@@ -943,15 +968,16 @@ static void au1000_tx_timeout(struct net_device *dev)
static void au1000_multicast_list(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
- netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
-
+ netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
+ reg = readl(&aup->mac->control);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- aup->mac->control |= MAC_PROMISCUOUS;
+ reg |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
- aup->mac->control |= MAC_PASS_ALL_MULTI;
- aup->mac->control &= ~MAC_PROMISCUOUS;
+ reg |= MAC_PASS_ALL_MULTI;
+ reg &= ~MAC_PROMISCUOUS;
netdev_info(dev, "Pass all multicast\n");
} else {
struct netdev_hw_addr *ha;
@@ -961,11 +987,12 @@ static void au1000_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
(long *)mc_filter);
- aup->mac->multi_hash_high = mc_filter[1];
- aup->mac->multi_hash_low = mc_filter[0];
- aup->mac->control &= ~MAC_PROMISCUOUS;
- aup->mac->control |= MAC_HASH_MODE;
+ writel(mc_filter[1], &aup->mac->multi_hash_high);
+ writel(mc_filter[0], &aup->mac->multi_hash_low);
+ reg &= ~MAC_PROMISCUOUS;
+ reg |= MAC_HASH_MODE;
}
+ writel(reg, &aup->mac->control);
}
static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -999,10 +1026,9 @@ static int __devinit au1000_probe(struct platform_device *pdev)
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
- db_dest_t *pDB, *pDBfree;
+ struct db_dest *pDB, *pDBfree;
int irq, i, err = 0;
struct resource *base, *macen;
- char ethaddr[6];
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
@@ -1025,13 +1051,15 @@ static int __devinit au1000_probe(struct platform_device *pdev)
goto out;
}
- if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
+ if (!request_mem_region(base->start, resource_size(base),
+ pdev->name)) {
dev_err(&pdev->dev, "failed to request memory region for base registers\n");
err = -ENXIO;
goto out;
}
- if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
+ if (!request_mem_region(macen->start, resource_size(macen),
+ pdev->name)) {
dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
err = -ENXIO;
goto err_request;
@@ -1049,10 +1077,12 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
- aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
+ aup->msg_enable = (au1000_debug < 4 ?
+ AU1000_DEF_MSG_ENABLE : au1000_debug);
- /* Allocate the data buffers */
- /* Snooping works fine with eth on all au1xxx */
+ /* Allocate the data buffers
+ * Snooping works fine with eth on all au1xxx
+ */
aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
@@ -1063,15 +1093,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
/* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
+ aup->mac = (struct mac_reg *)
+ ioremap_nocache(base->start, resource_size(base));
if (!aup->mac) {
dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
goto err_remap1;
}
- /* Setup some variables for quick register address access */
- aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
+ /* Setup some variables for quick register address access */
+ aup->enable = (u32 *)ioremap_nocache(macen->start,
+ resource_size(macen));
if (!aup->enable) {
dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
err = -ENXIO;
@@ -1079,33 +1111,26 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
aup->mac_id = pdev->id;
- if (pdev->id == 0) {
- if (prom_get_ethernet_addr(ethaddr) == 0)
- memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
- else {
- netdev_info(dev, "No MAC address found\n");
- /* Use the hard coded MAC addresses */
- }
-
+ if (pdev->id == 0)
au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- } else if (pdev->id == 1)
+ else if (pdev->id == 1)
au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
- /*
- * Assign to the Ethernet ports two consecutive MAC addresses
- * to match those that are printed on their stickers
- */
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[5] += pdev->id;
+ /* set a random MAC now in case platform_data doesn't provide one */
+ random_ether_addr(dev->dev_addr);
- *aup->enable = 0;
+ writel(0, &aup->enable);
aup->mac_enabled = 0;
pd = pdev->dev.platform_data;
if (!pd) {
- dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
+ dev_info(&pdev->dev, "no platform_data passed,"
+ " PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
+ if (is_valid_ether_addr(pd->mac))
+ memcpy(dev->dev_addr, pd->mac, 6);
+
aup->phy_static_config = pd->phy_static_config;
aup->phy_search_highest_addr = pd->phy_search_highest_addr;
aup->phy1_search_mac0 = pd->phy1_search_mac0;
@@ -1115,8 +1140,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
if (aup->phy_busid && aup->phy_busid > 0) {
- dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
- "bus not supported yet\n");
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
}
@@ -1168,17 +1192,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
for (i = 0; i < NUM_RX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
- if (!pDB) {
+ if (!pDB)
goto err_out;
- }
+
aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_TX_DMA; i++) {
pDB = au1000_GetFreeDB(aup);
- if (!pDB) {
+ if (!pDB)
goto err_out;
- }
+
aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
aup->tx_dma_ring[i]->len = 0;
aup->tx_db_inuse[i] = pDB;
@@ -1205,7 +1229,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
(unsigned long)base->start, irq);
if (version_printed++ == 0)
- printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+ pr_info("%s version %s %s\n",
+ DRV_NAME, DRV_VERSION, DRV_AUTHOR);
return 0;
@@ -1214,7 +1239,8 @@ err_out:
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
- * so we can reset the mac properly.*/
+ * so we can reset the mac properly.
+ */
au1000_reset_mac(dev);
for (i = 0; i < NUM_RX_DMA; i++) {
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index d06ec008fbf1..6229c774552c 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -44,34 +44,34 @@
* Data Buffer Descriptor. Data buffers must be aligned on 32 byte
* boundary for both, receive and transmit.
*/
-typedef struct db_dest {
+struct db_dest {
struct db_dest *pnext;
- volatile u32 *vaddr;
+ u32 *vaddr;
dma_addr_t dma_addr;
-} db_dest_t;
+};
/*
* The transmit and receive descriptors are memory
* mapped registers.
*/
-typedef struct tx_dma {
+struct tx_dma {
u32 status;
u32 buff_stat;
u32 len;
u32 pad;
-} tx_dma_t;
+};
-typedef struct rx_dma {
+struct rx_dma {
u32 status;
u32 buff_stat;
u32 pad[2];
-} rx_dma_t;
+};
/*
* MAC control registers, memory mapped.
*/
-typedef struct mac_reg {
+struct mac_reg {
u32 control;
u32 mac_addr_high;
u32 mac_addr_low;
@@ -82,16 +82,16 @@ typedef struct mac_reg {
u32 flow_control;
u32 vlan1_tag;
u32 vlan2_tag;
-} mac_reg_t;
+};
struct au1000_private {
- db_dest_t *pDBfree;
- db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS];
- volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA];
- volatile tx_dma_t *tx_dma_ring[NUM_TX_DMA];
- db_dest_t *rx_db_inuse[NUM_RX_DMA];
- db_dest_t *tx_db_inuse[NUM_TX_DMA];
+ struct db_dest *pDBfree;
+ struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
+ struct rx_dma *rx_dma_ring[NUM_RX_DMA];
+ struct tx_dma *tx_dma_ring[NUM_TX_DMA];
+ struct db_dest *rx_db_inuse[NUM_RX_DMA];
+ struct db_dest *tx_db_inuse[NUM_TX_DMA];
u32 rx_head;
u32 tx_head;
u32 tx_tail;
@@ -99,7 +99,9 @@ struct au1000_private {
int mac_id;
- int mac_enabled; /* whether MAC is currently enabled and running (req. for mdio) */
+ int mac_enabled; /* whether MAC is currently enabled and running
+ * (req. for mdio)
+ */
int old_link; /* used by au1000_adjust_link */
int old_speed;
@@ -117,9 +119,11 @@ struct au1000_private {
int phy_busid;
int phy_irq;
- /* These variables are just for quick access to certain regs addresses. */
- volatile mac_reg_t *mac; /* mac registers */
- volatile u32 *enable; /* address of MAC Enable Register */
+ /* These variables are just for quick access
+ * to certain regs addresses.
+ */
+ struct mac_reg *mac; /* mac registers */
+ u32 *enable; /* address of MAC Enable Register */
u32 vaddr; /* virtual address of rx/tx buffers */
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 37617abc1647..8e7c8a8e61c7 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -818,7 +818,7 @@ static int b44_rx(struct b44 *bp, int budget)
copy_skb->data, len);
skb = copy_skb;
}
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bp->dev);
netif_receive_skb(skb);
received++;
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
+ if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
+ bp->istat &= ~ISTAT_RFO;
+ b44_disable_ints(bp);
+ ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+ netif_wake_queue(bp->dev);
+ }
+
spin_unlock_irqrestore(&bp->lock, flags);
work_done = 0;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0d2c5da08937..ecfef240a303 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -293,22 +293,22 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
- priv->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
continue;
}
/* recycle packet if it's marked as bad */
if (unlikely(len_stat & DMADESC_ERR_MASK)) {
- priv->stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len_stat & DMADESC_OVSIZE_MASK)
- priv->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len_stat & DMADESC_CRC_MASK)
- priv->stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
if (len_stat & DMADESC_UNDER_MASK)
- priv->stats.rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
if (len_stat & DMADESC_OV_MASK)
- priv->stats.rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
continue;
}
@@ -324,7 +324,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
nskb = netdev_alloc_skb_ip_align(dev, len);
if (!nskb) {
/* forget packet, just rearm desc */
- priv->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
continue;
}
@@ -342,8 +342,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
netif_receive_skb(skb);
} while (--budget > 0);
@@ -403,7 +403,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
spin_unlock(&priv->tx_lock);
if (desc->len_stat & DMADESC_UNDER_MASK)
- priv->stats.tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
released++;
@@ -563,8 +563,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!priv->tx_desc_count)
netif_stop_queue(dev);
- priv->stats.tx_bytes += skb->len;
- priv->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
ret = NETDEV_TX_OK;
out_unlock:
@@ -798,7 +798,7 @@ static int bcm_enet_open(struct net_device *dev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mac_id ? "1" : "0", priv->phy_id);
- phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
+ phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
@@ -1141,17 +1141,6 @@ static int bcm_enet_stop(struct net_device *dev)
}
/*
- * core request to return device rx/tx stats
- */
-static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
-{
- struct bcm_enet_priv *priv;
-
- priv = netdev_priv(dev);
- return &priv->stats;
-}
-
-/*
* ethtool callbacks
*/
struct bcm_enet_stats {
@@ -1163,16 +1152,18 @@ struct bcm_enet_stats {
#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
offsetof(struct bcm_enet_priv, m)
+#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
+ offsetof(struct net_device_stats, m)
static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
- { "rx_packets", GEN_STAT(stats.rx_packets), -1 },
- { "tx_packets", GEN_STAT(stats.tx_packets), -1 },
- { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
- { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
- { "rx_errors", GEN_STAT(stats.rx_errors), -1 },
- { "tx_errors", GEN_STAT(stats.tx_errors), -1 },
- { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
- { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
+ { "rx_packets", DEV_STAT(rx_packets), -1 },
+ { "tx_packets", DEV_STAT(tx_packets), -1 },
+ { "rx_bytes", DEV_STAT(rx_bytes), -1 },
+ { "tx_bytes", DEV_STAT(tx_bytes), -1 },
+ { "rx_errors", DEV_STAT(rx_errors), -1 },
+ { "tx_errors", DEV_STAT(tx_errors), -1 },
+ { "rx_dropped", DEV_STAT(rx_dropped), -1 },
+ { "tx_dropped", DEV_STAT(tx_dropped), -1 },
{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
@@ -1328,7 +1319,11 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
char *p;
s = &bcm_enet_gstrings_stats[i];
- p = (char *)priv + s->stat_offset;
+ if (s->mib_reg == -1)
+ p = (char *)&netdev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
data[i] = (s->sizeof_stat == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
@@ -1605,7 +1600,6 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_open = bcm_enet_open,
.ndo_stop = bcm_enet_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
- .ndo_get_stats = bcm_enet_get_stats,
.ndo_set_mac_address = bcm_enet_set_mac_address,
.ndo_set_multicast_list = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h
index bd3684d42d74..0e3048b788c2 100644
--- a/drivers/net/bcm63xx_enet.h
+++ b/drivers/net/bcm63xx_enet.h
@@ -274,7 +274,6 @@ struct bcm_enet_priv {
int pause_tx;
/* stats */
- struct net_device_stats stats;
struct bcm_enet_mib_counters mib;
/* after mib interrupt, mib registers update is done in this
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 99197bd54da5..4faf6961dcec 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@ struct be_drvr_stats {
u64 be_rx_bytes_prev;
u64 be_rx_pkts;
u32 be_rx_rate;
+ u32 be_rx_mcast_pkt;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
u32 be_802_3_dropped_frames;
@@ -413,6 +414,20 @@ static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
adapter->is_virtfn = (data != 0xAA);
}
+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+ u32 addr;
+
+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+ mac[5] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[3] = (u8)((addr >> 16) & 0xFF);
+ mac[2] = 0xC9;
+ mac[1] = 0x00;
+ mac[0] = 0x00;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d305494a606..34abcc9403d6 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
-
- /* Interpret compl as a async link evt */
- be_async_link_state_process(adapter,
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
*status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- be_dump_ue(adapter);
+ be_detect_dump_ue(adapter);
return -1;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a28cfda..ad1e6fac60c5 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cd16243c7c36..d92063420c25 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(be_rx_events)},
{DRVSTAT_INFO(be_tx_compl)},
{DRVSTAT_INFO(be_rx_compl)},
+ {DRVSTAT_INFO(be_rx_mcast_pkt)},
{DRVSTAT_INFO(be_ethrx_post_fail)},
{DRVSTAT_INFO(be_802_3_dropped_frames)},
{DRVSTAT_INFO(be_802_3_malformed_frames)},
@@ -90,6 +91,9 @@ static const struct be_ethtool_stat et_stats[] = {
{PORTSTAT_INFO(rx_non_rss_packets)},
{PORTSTAT_INFO(rx_ipv4_packets)},
{PORTSTAT_INFO(rx_ipv6_packets)},
+ {PORTSTAT_INFO(rx_switched_unicast_packets)},
+ {PORTSTAT_INFO(rx_switched_multicast_packets)},
+ {PORTSTAT_INFO(rx_switched_broadcast_packets)},
{PORTSTAT_INFO(tx_unicastframes)},
{PORTSTAT_INFO(tx_multicastframes)},
{PORTSTAT_INFO(tx_broadcastframes)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046402b2..a2ec5df0d733 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
#define FLASH_FCoE_BIOS_START_g3 (13631488)
#define FLASH_REDBOOT_START_g3 (262144)
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
/*
* BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 74e146f470c6..43a3a574e2e0 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+ dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
/* no space available in linux */
dev_stats->tx_dropped = 0;
- dev_stats->multicast = port_stats->rx_multicast_frames;
dev_stats->collisions = 0;
/* detailed tx_errors */
@@ -365,11 +365,6 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
rx_eq->cur_eqd = eqd;
}
-static struct net_device_stats *be_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
static u32 be_calc_rate(u64 bytes, unsigned long ticks)
{
u64 rate = bytes;
@@ -848,7 +843,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
}
static void be_rx_stats_update(struct be_adapter *adapter,
- u32 pktsize, u16 numfrags)
+ u32 pktsize, u16 numfrags, u8 pkt_type)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -856,6 +851,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
stats->be_rx_pkts++;
+
+ if (pkt_type == BE_MULTICAST_PACKET)
+ stats->be_rx_mcast_pkt++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +923,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
+ u8 pkt_type;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
@@ -993,7 +993,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
BUG_ON(j > MAX_SKB_FRAGS);
done:
- be_rx_stats_update(adapter, pktsize, num_rcvd);
+ be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1021,7 +1021,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1060,6 +1060,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
u16 i, rxq_idx = 0, vid, j;
u8 vtm;
+ u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
@@ -1070,6 +1071,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+ pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
@@ -1125,7 +1127,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
}
- be_rx_stats_update(adapter, pkt_size, num_rcvd);
+ be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1745,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
return 1;
}
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
- u32 online0 = 0, online1 = 0;
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
- pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
- if (!online0 || !online1) {
- adapter->ue_detected = true;
- dev_err(&adapter->pdev->dev,
- "UE Detected!! online0=%d online1=%d\n",
- online0, online1);
- return true;
- }
-
- return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
{
u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
u32 i;
@@ -1779,6 +1762,11 @@ void be_dump_ue(struct be_adapter *adapter)
ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
if (ue_status_lo) {
for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
if (ue_status_lo & 1)
@@ -1814,10 +1802,8 @@ static void be_worker(struct work_struct *work)
adapter->rx_post_starved = false;
be_post_rx_frags(adapter);
}
- if (!adapter->ue_detected) {
- if (be_detect_ue(adapter))
- be_dump_ue(adapter);
- }
+ if (!adapter->ue_detected)
+ be_detect_dump_ue(adapter);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@@ -2093,6 +2079,47 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
+/*
+ * Generate a seed MAC address from the PF MAC Address using jhash.
+ * MAC Address for VFs are assigned incrementally starting from the seed.
+ * These addresses are programmed in the ASIC by the PF and the VF driver
+ * queries for the MAC address during its probe.
+ */
+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+{
+ u32 vf = 0;
+ int status;
+ u8 mac[ETH_ALEN];
+
+ be_vf_eth_addr_generate(adapter, mac);
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->vf_cfg[vf].vf_if_handle,
+ &adapter->vf_cfg[vf].vf_pmac_id);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Mac address add failed for VF %d\n", vf);
+ else
+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+ mac[5] += 1;
+ }
+ return status;
+}
+
+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
+{
+ u32 vf;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+ be_cmd_pmac_del(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ adapter->vf_cfg[vf].vf_pmac_id);
+ }
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -2152,10 +2179,20 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto rx_qs_destroy;
+ if (be_physfn(adapter)) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto mcc_q_destroy;
+ }
+
adapter->link_speed = -1;
return 0;
+mcc_q_destroy:
+ if (be_physfn(adapter))
+ be_vf_eth_addr_rem(adapter);
+ be_mcc_queues_destroy(adapter);
rx_qs_destroy:
be_rx_queues_destroy(adapter);
tx_qs_destroy:
@@ -2172,6 +2209,9 @@ do_none:
static int be_clear(struct be_adapter *adapter)
{
+ if (be_physfn(adapter))
+ be_vf_eth_addr_rem(adapter);
+
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
@@ -2399,7 +2439,6 @@ static struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
- .ndo_get_stats = be_get_stats,
.ndo_set_rx_mode = be_set_multicast_list,
.ndo_set_mac_address = be_mac_addr_set,
.ndo_change_mtu = be_change_mtu,
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 012613fde3f4..7a0e4156fade 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -803,15 +803,14 @@ static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompa
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
struct bfin_mac_local *lp = netdev_priv(netdev);
- union skb_shared_tx *shtx = skb_tx(skb);
- if (shtx->hardware) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
int timeout_cnt = MAX_TIMEOUT_CNT;
/* When doing time stamping, keep the connection to the socket
* a while longer
*/
- shtx->in_progress = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/*
* The timestamping is done at the EMAC module's MII/RMII interface
@@ -991,7 +990,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct bfin_mac_local *lp = netdev_priv(dev);
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
- union skb_shared_tx *shtx = skb_tx(skb);
current_tx_ptr->skb = skb;
@@ -1005,7 +1003,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
* of this field are the length of the packet payload in bytes and the higher
* 4 bits are the timestamping enable field.
*/
- if (shtx->hardware)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
*data |= 0x1000;
current_tx_ptr->desc_a.start_addr = (u32)data;
@@ -1015,7 +1013,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
} else {
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
/* enable timestamping for the sent packet */
- if (shtx->hardware)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
*((u16 *)(current_tx_ptr->packet)) |= 0x1000;
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
skb->len);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 959add2410bf..9322699bb31c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1233,15 +1233,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
}
spin_unlock_irqrestore(&bp->lock, flags);
}
-static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct bmac_data *bp = netdev_priv(dev);
- strcpy(info->driver, "bmac");
- strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
-}
static const struct ethtool_ops bmac_ethtool_ops = {
- .get_drvinfo = bmac_get_drvinfo,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/bna/Makefile b/drivers/net/bna/Makefile
new file mode 100644
index 000000000000..a5d604de7fea
--- /dev/null
+++ b/drivers/net/bna/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+# All rights reserved.
+#
+
+obj-$(CONFIG_BNA) += bna.o
+
+bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
+bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
+
+EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
new file mode 100644
index 000000000000..f7b789a3b217
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.c
@@ -0,0 +1,291 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_defs_cna.h"
+#include "cna.h"
+#include "bfa_cee.h"
+#include "bfi_cna.h"
+#include "bfa_ioc.h"
+
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
+
+static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
+static void bfa_cee_format_cee_cfg(void *buffer);
+
+static void
+bfa_cee_format_cee_cfg(void *buffer)
+{
+ struct bfa_cee_attr *cee_cfg = buffer;
+ bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
+}
+
+static void
+bfa_cee_stats_swap(struct bfa_cee_stats *stats)
+{
+ u32 *buffer = (u32 *)stats;
+ int i;
+
+ for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
+ i++) {
+ buffer[i] = ntohl(buffer[i]);
+ }
+}
+
+static void
+bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
+{
+ lldp_cfg->time_to_live =
+ ntohs(lldp_cfg->time_to_live);
+ lldp_cfg->enabled_system_cap =
+ ntohs(lldp_cfg->enabled_system_cap);
+}
+
+/**
+ * bfa_cee_attr_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE attributes
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_attr_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
+}
+/**
+ * bfa_cee_stats_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE stats
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+static u32
+bfa_cee_stats_meminfo(void)
+{
+ return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_attr_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr));
+ bfa_cee_format_cee_cfg(cee->attr);
+ }
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn)
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->get_stats_status = status;
+ if (status == BFA_STATUS_OK) {
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats));
+ bfa_cee_stats_swap(cee->stats);
+ }
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn)
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+}
+
+/**
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+/**
+ * bfa_nw_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_nw_cee_meminfo(void)
+{
+ return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
+}
+
+/**
+ * bfa_nw_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
+ cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
+ cee->attr = (struct bfa_cee_attr *) dma_kva;
+ cee->stats = (struct bfa_cee_stats *)
+ (dma_kva + bfa_cee_attr_meminfo());
+}
+
+/**
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+static void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp *get_rsp;
+ struct bfa_cee *cee = (struct bfa_cee *) cbarg;
+ msg = (union bfi_cee_i2h_msg_u *) m;
+ get_rsp = (struct bfi_cee_get_rsp *) m;
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * bfa_cee_hbfail()
+ *
+ * @brief CEE module heart-beat failure handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+static void
+bfa_cee_hbfail(void *arg)
+{
+ struct bfa_cee *cee;
+ cee = (struct bfa_cee *) arg;
+
+ if (cee->get_attr_pending == true) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == true) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == true) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+}
+
+/**
+ * bfa_nw_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
+ void *dev)
+{
+ BUG_ON(!(cee != NULL));
+ cee->dev = dev;
+ cee->ioc = ioc;
+
+ bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
+ bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
new file mode 100644
index 000000000000..20543d15b64f
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.h
@@ -0,0 +1,64 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_CEE_H__
+#define __BFA_CEE_H__
+
+#include "bfa_defs_cna.h"
+#include "bfa_ioc.h"
+
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
+typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
+
+struct bfa_cee_cbfn {
+ bfa_cee_get_attr_cbfn_t get_attr_cbfn;
+ void *get_attr_cbarg;
+ bfa_cee_get_stats_cbfn_t get_stats_cbfn;
+ void *get_stats_cbarg;
+ bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
+ void *reset_stats_cbarg;
+};
+
+struct bfa_cee {
+ void *dev;
+ bool get_attr_pending;
+ bool get_stats_pending;
+ bool reset_stats_pending;
+ enum bfa_status get_attr_status;
+ enum bfa_status get_stats_status;
+ enum bfa_status reset_stats_status;
+ struct bfa_cee_cbfn cbfn;
+ struct bfa_ioc_hbfail_notify hbfail;
+ struct bfa_cee_attr *attr;
+ struct bfa_cee_stats *stats;
+ struct bfa_dma attr_dma;
+ struct bfa_dma stats_dma;
+ struct bfa_ioc *ioc;
+ struct bfa_mbox_cmd get_cfg_mb;
+ struct bfa_mbox_cmd get_stats_mb;
+ struct bfa_mbox_cmd reset_stats_mb;
+};
+
+u32 bfa_nw_cee_meminfo(void);
+void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
+ u64 dma_pa);
+void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
+
+#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
new file mode 100644
index 000000000000..29c1b8de2c2d
--- /dev/null
+++ b/drivers/net/bna/bfa_defs.h
@@ -0,0 +1,243 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "cna.h"
+#include "bfa_defs_status.h"
+#include "bfa_defs_mfg_comm.h"
+
+#define BFA_STRING_32 32
+#define BFA_VERSION_LEN 64
+
+/**
+ * ---------------------- adapter definitions ------------
+ */
+
+/**
+ * BFA adapter level attributes.
+ */
+enum {
+ BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+ /*
+ *!< adapter serial num length
+ */
+ BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
+ BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
+ BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
+ BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
+ BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
+};
+
+struct bfa_adapter_attr {
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 card_type;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ u64 pwwn;
+ char node_symname[FC_SYMNAME_MAX];
+ char hw_ver[BFA_VERSION_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+ char os_type[BFA_ADAPTER_OS_TYPE_LEN];
+ struct bfa_mfg_vpd vpd;
+ struct mac mac;
+
+ u8 nports;
+ u8 max_speed;
+ u8 prototype;
+ char asic_rev;
+
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 cna_capable;
+
+ u8 is_mezz;
+ u8 trunk_capable;
+};
+
+/**
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+ BFA_IOC_DRIVER_LEN = 16,
+ BFA_IOC_CHIP_REV_LEN = 8,
+};
+
+/**
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr {
+ char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
+ char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
+ char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
+ char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
+ char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
+ char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
+};
+
+/**
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr {
+ u16 vendor_id; /*!< PCI vendor ID */
+ u16 device_id; /*!< PCI device ID */
+ u16 ssid; /*!< subsystem ID */
+ u16 ssvid; /*!< subsystem vendor ID */
+ u32 pcifn; /*!< PCI device function */
+ u32 rsvd; /* padding */
+ char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
+};
+
+/**
+ * IOC states
+ */
+enum bfa_ioc_state {
+ BFA_IOC_RESET = 1, /*!< IOC is in reset state */
+ BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
+ BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
+ BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
+ BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
+ BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
+ BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
+ BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
+ BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
+};
+
+/**
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats {
+ u32 enable_reqs;
+ u32 disable_reqs;
+ u32 get_attr_reqs;
+ u32 dbg_sync;
+ u32 dbg_dump;
+ u32 unknown_reqs;
+};
+
+/**
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats {
+ u32 ioc_isrs;
+ u32 ioc_enables;
+ u32 ioc_disables;
+ u32 ioc_hbfails;
+ u32 ioc_boots;
+ u32 stats_tmos;
+ u32 hb_count;
+ u32 disable_reqs;
+ u32 enable_reqs;
+ u32 disable_replies;
+ u32 enable_replies;
+};
+
+/**
+ * IOC statistics
+ */
+struct bfa_ioc_stats {
+ struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
+ struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
+};
+
+enum bfa_ioc_type {
+ BFA_IOC_TYPE_FC = 1,
+ BFA_IOC_TYPE_FCoE = 2,
+ BFA_IOC_TYPE_LL = 3,
+};
+
+/**
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr {
+ enum bfa_ioc_type ioc_type;
+ enum bfa_ioc_state state; /*!< IOC state */
+ struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
+ struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
+ struct bfa_ioc_pci_attr pci_attr;
+ u8 port_id; /*!< port number */
+ u8 rsvd[7]; /*!< 64bit align */
+};
+
+/**
+ * ---------------------- mfg definitions ------------
+ */
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
+
+#define BFA_MFG_PARTNUM_SIZE 14
+#define BFA_MFG_SUPPLIER_ID_SIZE 10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+
+#pragma pack(1)
+
+/**
+ * @brief BFA adapter manufacturing block definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block {
+ u8 version; /*!< manufacturing block version */
+ u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+ u16 mfgsize; /*!< mfg block size */
+ u16 u16_chksum; /*!< old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /*!< manufacturing day */
+ u8 mfg_month; /*!< manufacturing month */
+ u16 mfg_year; /*!< manufacturing year */
+ u64 mfg_wwn; /*!< wwn base for this adapter */
+ u8 num_wwn; /*!< number of wwns assigned */
+ u8 mfg_speeds; /*!< speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char
+ supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char
+ supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /*!< mac address */
+ u8 num_mac; /*!< number of mac addresses */
+ u8 rsv2;
+ u32 mfg_type; /*!< card type */
+ u8 rsv3[108];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+};
+
+#pragma pack()
+
+/**
+ * ---------------------- pci definitions ------------
+ */
+
+#define bfa_asic_id_ct(devid) \
+ ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
+ (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
+
+#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/bna/bfa_defs_cna.h b/drivers/net/bna/bfa_defs_cna.h
new file mode 100644
index 000000000000..7e0a9187bdd5
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_cna.h
@@ -0,0 +1,223 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_DEFS_CNA_H__
+#define __BFA_DEFS_CNA_H__
+
+#include "bfa_defs.h"
+
+/**
+ * @brief
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 tx_frames; /*!< Tx frames */
+ u64 tx_words; /*!< Tx words */
+ u64 tx_lip; /*!< Tx LIP */
+ u64 tx_nos; /*!< Tx NOS */
+ u64 tx_ols; /*!< Tx OLS */
+ u64 tx_lr; /*!< Tx LR */
+ u64 tx_lrr; /*!< Tx LRR */
+ u64 rx_frames; /*!< Rx frames */
+ u64 rx_words; /*!< Rx words */
+ u64 lip_count; /*!< Rx LIP */
+ u64 nos_count; /*!< Rx NOS */
+ u64 ols_count; /*!< Rx OLS */
+ u64 lr_count; /*!< Rx LR */
+ u64 lrr_count; /*!< Rx LRR */
+ u64 invalid_crcs; /*!< Rx CRC err frames */
+ u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
+ u64 undersized_frm; /*!< Rx undersized frames */
+ u64 oversized_frm; /*!< Rx oversized frames */
+ u64 bad_eof_frm; /*!< Rx frames with bad EOF */
+ u64 error_frames; /*!< Errored frames */
+ u64 dropped_frames; /*!< Dropped frames */
+ u64 link_failures; /*!< Link Failure (LF) count */
+ u64 loss_of_syncs; /*!< Loss of sync count */
+ u64 loss_of_signals; /*!< Loss of signal count */
+ u64 primseq_errs; /*!< Primitive sequence protocol err. */
+ u64 bad_os_count; /*!< Invalid ordered sets */
+ u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
+ u64 err_enc; /*!< Encoding err frame_8b10b */
+};
+
+/**
+ * @brief
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats {
+ u64 secs_reset; /*!< Seconds since stats is reset */
+ u64 frame_64; /*!< Frames 64 bytes */
+ u64 frame_65_127; /*!< Frames 65-127 bytes */
+ u64 frame_128_255; /*!< Frames 128-255 bytes */
+ u64 frame_256_511; /*!< Frames 256-511 bytes */
+ u64 frame_512_1023; /*!< Frames 512-1023 bytes */
+ u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
+ u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
+ u64 tx_bytes; /*!< Tx bytes */
+ u64 tx_packets; /*!< Tx packets */
+ u64 tx_mcast_packets; /*!< Tx multicast packets */
+ u64 tx_bcast_packets; /*!< Tx broadcast packets */
+ u64 tx_control_frame; /*!< Tx control frame */
+ u64 tx_drop; /*!< Tx drops */
+ u64 tx_jabber; /*!< Tx jabber */
+ u64 tx_fcs_error; /*!< Tx FCS errors */
+ u64 tx_fragments; /*!< Tx fragments */
+ u64 rx_bytes; /*!< Rx bytes */
+ u64 rx_packets; /*!< Rx packets */
+ u64 rx_mcast_packets; /*!< Rx multicast packets */
+ u64 rx_bcast_packets; /*!< Rx broadcast packets */
+ u64 rx_control_frames; /*!< Rx control frames */
+ u64 rx_unknown_opcode; /*!< Rx unknown opcode */
+ u64 rx_drop; /*!< Rx drops */
+ u64 rx_jabber; /*!< Rx jabber */
+ u64 rx_fcs_error; /*!< Rx FCS errors */
+ u64 rx_alignment_error; /*!< Rx alignment errors */
+ u64 rx_frame_length_error; /*!< Rx frame len errors */
+ u64 rx_code_error; /*!< Rx code errors */
+ u64 rx_fragments; /*!< Rx fragments */
+ u64 rx_pause; /*!< Rx pause */
+ u64 rx_zero_pause; /*!< Rx zero pause */
+ u64 tx_pause; /*!< Tx pause */
+ u64 tx_zero_pause; /*!< Tx zero pause */
+ u64 rx_fcoe_pause; /*!< Rx FCoE pause */
+ u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
+ u64 tx_fcoe_pause; /*!< Tx FCoE pause */
+ u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
+};
+
+/**
+ * @brief
+ * Port statistics.
+ */
+union bfa_port_stats_u {
+ struct bfa_port_fc_stats fc;
+ struct bfa_port_eth_stats eth;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY (8)
+#define BFA_CEE_DCBX_MAX_PGID (8)
+
+#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
+#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
+#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
+#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
+#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
+#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
+#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
+#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
+#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
+#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
+#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
+
+/* LLDP string type */
+struct bfa_cee_lldp_str {
+ u8 sub_type;
+ u8 len;
+ u8 rsvd[2];
+ u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+/* LLDP paramters */
+struct bfa_cee_lldp_cfg {
+ struct bfa_cee_lldp_str chassis_id;
+ struct bfa_cee_lldp_str port_id;
+ struct bfa_cee_lldp_str port_desc;
+ struct bfa_cee_lldp_str sys_name;
+ struct bfa_cee_lldp_str sys_desc;
+ struct bfa_cee_lldp_str mgmt_addr;
+ u16 time_to_live;
+ u16 enabled_system_cap;
+};
+
+enum bfa_cee_dcbx_version {
+ DCBX_PROTOCOL_PRECEE = 1,
+ DCBX_PROTOCOL_CEE = 2,
+};
+
+enum bfa_cee_lls {
+ /* LLS is down because the TLV not sent by the peer */
+ CEE_LLS_DOWN_NO_TLV = 0,
+ /* LLS is down as advertised by the peer */
+ CEE_LLS_DOWN = 1,
+ CEE_LLS_UP = 2,
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg {
+ u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+ u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+ u8 pfc_primap; /* bitmap of priorties with PFC enabled */
+ u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+ u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+ u8 dcbx_version; /* operating version:CEE or preCEE */
+ u8 lls_fcoe; /* FCoE Logical Link Status */
+ u8 lls_lan; /* LAN Logical Link Status */
+ u8 rsvd[2];
+};
+
+/* CEE status */
+/* Making this to tri-state for the benefit of port list command */
+enum bfa_cee_status {
+ CEE_UP = 0,
+ CEE_PHY_UP = 1,
+ CEE_LOOPBACK = 2,
+ CEE_PHY_DOWN = 3,
+};
+
+/* CEE Query */
+struct bfa_cee_attr {
+ u8 cee_status;
+ u8 error_reason;
+ struct bfa_cee_lldp_cfg lldp_remote;
+ struct bfa_cee_dcbx_cfg dcbx_remote;
+ mac_t src_mac;
+ u8 link_speed;
+ u8 nw_priority;
+ u8 filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats {
+ u32 lldp_tx_frames; /*!< LLDP Tx Frames */
+ u32 lldp_rx_frames; /*!< LLDP Rx Frames */
+ u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
+ u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
+ u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
+ u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
+ u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
+ u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
+ u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
+ u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
+ u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
+ u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
+ u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
+ u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
+ u32 cee_status_down; /*!< CEE status down */
+ u32 cee_status_up; /*!< CEE status up */
+ u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
+ u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 000000000000..987978fcb3fe
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,244 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_MFG_COMM_H__
+#define __BFA_DEFS_MFG_COMM_H__
+
+#include "cna.h"
+
+/**
+ * Manufacturing block version
+ */
+#define BFA_MFG_VERSION 2
+#define BFA_MFG_VERSION_UNINIT 0xFF
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+#define BFA_MFG_SERIALNUM_SIZE 11
+#define STRSZ(_n) (((_n) + 4) & ~3)
+
+/**
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
+ BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
+ BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
+ BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
+ BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
+ BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
+ BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
+};
+
+#pragma pack(1)
+
+/**
+ * Check if 1-port card
+ */
+#define bfa_mfg_is_1port(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P1))
+
+/**
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE || \
+ (type) == BFA_MFG_TYPE_ASTRA || \
+ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+ (type) == BFA_MFG_TYPE_LIGHTNING))
+
+/**
+ * Check if card type valid
+ */
+#define bfa_mfg_is_card_type_valid(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ bfa_mfg_is_mezz(type)))
+
+/**
+ * Check if the card having old wwn/mac handling
+ */
+#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+ (type) == BFA_MFG_TYPE_FC8P2 || \
+ (type) == BFA_MFG_TYPE_FC8P1 || \
+ (type) == BFA_MFG_TYPE_FC4P2 || \
+ (type) == BFA_MFG_TYPE_FC4P1 || \
+ (type) == BFA_MFG_TYPE_CNA10P2 || \
+ (type) == BFA_MFG_TYPE_CNA10P1 || \
+ (type) == BFA_MFG_TYPE_JAYHAWK || \
+ (type) == BFA_MFG_TYPE_WANCHESE))
+
+#define bfa_mfg_increment_wwn_mac(m, i) \
+do { \
+ u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
+ t += (i); \
+ (m)[0] = (t >> 16) & 0xFF; \
+ (m)[1] = (t >> 8) & 0xFF; \
+ (m)[2] = t & 0xFF; \
+} while (0)
+
+#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
+do { \
+ switch ((card_type)) { \
+ case BFA_MFG_TYPE_FC8P2: \
+ case BFA_MFG_TYPE_JAYHAWK: \
+ case BFA_MFG_TYPE_ASTRA: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
+ BFI_ADAPTER_SETP(SPEED, 8); \
+ break; \
+ case BFA_MFG_TYPE_FC8P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
+ BFI_ADAPTER_SETP(SPEED, 8); \
+ break; \
+ case BFA_MFG_TYPE_FC4P2: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
+ BFI_ADAPTER_SETP(SPEED, 4); \
+ break; \
+ case BFA_MFG_TYPE_FC4P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
+ BFI_ADAPTER_SETP(SPEED, 4); \
+ break; \
+ case BFA_MFG_TYPE_CNA10P2: \
+ case BFA_MFG_TYPE_WANCHESE: \
+ case BFA_MFG_TYPE_LIGHTNING_P0: \
+ case BFA_MFG_TYPE_LIGHTNING: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
+ break; \
+ case BFA_MFG_TYPE_CNA10P1: \
+ (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
+ break; \
+ default: \
+ (prop) = BFI_ADAPTER_UNSUPP; \
+ } \
+} while (0)
+
+enum {
+ CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
+ CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
+ CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
+ CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
+ CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
+ CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
+ CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+};
+
+#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
+do { \
+ if ((gpio) & CB_GPIO_PROTO) { \
+ (prop) |= BFI_ADAPTER_PROTO; \
+ (gpio) &= ~CB_GPIO_PROTO; \
+ } \
+ switch ((gpio)) { \
+ case CB_GPIO_TTV: \
+ (prop) |= BFI_ADAPTER_TTV; \
+ case CB_GPIO_DFLY: \
+ case CB_GPIO_FC8P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P2; \
+ break; \
+ case CB_GPIO_FC8P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
+ (card_type) = BFA_MFG_TYPE_FC8P1; \
+ break; \
+ case CB_GPIO_FC4P2: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P2; \
+ break; \
+ case CB_GPIO_FC4P1: \
+ (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
+ (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
+ (card_type) = BFA_MFG_TYPE_FC4P1; \
+ break; \
+ default: \
+ (prop) |= BFI_ADAPTER_UNSUPP; \
+ (card_type) = BFA_MFG_TYPE_INVALID; \
+ } \
+} while (0)
+
+/**
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN 512
+#define BFA_MFG_VPD_LEN_INVALID 0
+
+#define BFA_MFG_VPD_PCI_HDR_OFF 137
+#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+};
+
+/**
+ * @brief BFA adapter flash vpd data definition.
+ *
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd {
+ u8 version; /*!< vpd data version */
+ u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
+ u8 chksum; /*!< u8 checksum */
+ u8 vendor; /*!< vendor */
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/bna/bfa_defs_status.h
new file mode 100644
index 000000000000..af951126375c
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_status.h
@@ -0,0 +1,216 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFA_DEFS_STATUS_H__
+#define __BFA_DEFS_STATUS_H__
+
+/**
+ * API status return values
+ *
+ * NOTE: The error msgs are auto generated from the comments. Only singe line
+ * comments are supported
+ */
+enum bfa_status {
+ BFA_STATUS_OK = 0,
+ BFA_STATUS_FAILED = 1,
+ BFA_STATUS_EINVAL = 2,
+ BFA_STATUS_ENOMEM = 3,
+ BFA_STATUS_ENOSYS = 4,
+ BFA_STATUS_ETIMER = 5,
+ BFA_STATUS_EPROTOCOL = 6,
+ BFA_STATUS_ENOFCPORTS = 7,
+ BFA_STATUS_NOFLASH = 8,
+ BFA_STATUS_BADFLASH = 9,
+ BFA_STATUS_SFP_UNSUPP = 10,
+ BFA_STATUS_UNKNOWN_VFID = 11,
+ BFA_STATUS_DATACORRUPTED = 12,
+ BFA_STATUS_DEVBUSY = 13,
+ BFA_STATUS_ABORTED = 14,
+ BFA_STATUS_NODEV = 15,
+ BFA_STATUS_HDMA_FAILED = 16,
+ BFA_STATUS_FLASH_BAD_LEN = 17,
+ BFA_STATUS_UNKNOWN_LWWN = 18,
+ BFA_STATUS_UNKNOWN_RWWN = 19,
+ BFA_STATUS_FCPT_LS_RJT = 20,
+ BFA_STATUS_VPORT_EXISTS = 21,
+ BFA_STATUS_VPORT_MAX = 22,
+ BFA_STATUS_UNSUPP_SPEED = 23,
+ BFA_STATUS_INVLD_DFSZ = 24,
+ BFA_STATUS_CNFG_FAILED = 25,
+ BFA_STATUS_CMD_NOTSUPP = 26,
+ BFA_STATUS_NO_ADAPTER = 27,
+ BFA_STATUS_LINKDOWN = 28,
+ BFA_STATUS_FABRIC_RJT = 29,
+ BFA_STATUS_UNKNOWN_VWWN = 30,
+ BFA_STATUS_NSLOGIN_FAILED = 31,
+ BFA_STATUS_NO_RPORTS = 32,
+ BFA_STATUS_NSQUERY_FAILED = 33,
+ BFA_STATUS_PORT_OFFLINE = 34,
+ BFA_STATUS_RPORT_OFFLINE = 35,
+ BFA_STATUS_TGTOPEN_FAILED = 36,
+ BFA_STATUS_BAD_LUNS = 37,
+ BFA_STATUS_IO_FAILURE = 38,
+ BFA_STATUS_NO_FABRIC = 39,
+ BFA_STATUS_EBADF = 40,
+ BFA_STATUS_EINTR = 41,
+ BFA_STATUS_EIO = 42,
+ BFA_STATUS_ENOTTY = 43,
+ BFA_STATUS_ENXIO = 44,
+ BFA_STATUS_EFOPEN = 45,
+ BFA_STATUS_VPORT_WWN_BP = 46,
+ BFA_STATUS_PORT_NOT_DISABLED = 47,
+ BFA_STATUS_BADFRMHDR = 48,
+ BFA_STATUS_BADFRMSZ = 49,
+ BFA_STATUS_MISSINGFRM = 50,
+ BFA_STATUS_LINKTIMEOUT = 51,
+ BFA_STATUS_NO_FCPIM_NEXUS = 52,
+ BFA_STATUS_CHECKSUM_FAIL = 53,
+ BFA_STATUS_GZME_FAILED = 54,
+ BFA_STATUS_SCSISTART_REQD = 55,
+ BFA_STATUS_IOC_FAILURE = 56,
+ BFA_STATUS_INVALID_WWN = 57,
+ BFA_STATUS_MISMATCH = 58,
+ BFA_STATUS_IOC_ENABLED = 59,
+ BFA_STATUS_ADAPTER_ENABLED = 60,
+ BFA_STATUS_IOC_NON_OP = 61,
+ BFA_STATUS_ADDR_MAP_FAILURE = 62,
+ BFA_STATUS_SAME_NAME = 63,
+ BFA_STATUS_PENDING = 64,
+ BFA_STATUS_8G_SPD = 65,
+ BFA_STATUS_4G_SPD = 66,
+ BFA_STATUS_AD_IS_ENABLE = 67,
+ BFA_STATUS_EINVAL_TOV = 68,
+ BFA_STATUS_EINVAL_QDEPTH = 69,
+ BFA_STATUS_VERSION_FAIL = 70,
+ BFA_STATUS_DIAG_BUSY = 71,
+ BFA_STATUS_BEACON_ON = 72,
+ BFA_STATUS_BEACON_OFF = 73,
+ BFA_STATUS_LBEACON_ON = 74,
+ BFA_STATUS_LBEACON_OFF = 75,
+ BFA_STATUS_PORT_NOT_INITED = 76,
+ BFA_STATUS_RPSC_ENABLED = 77,
+ BFA_STATUS_ENOFSAVE = 78,
+ BFA_STATUS_BAD_FILE = 79,
+ BFA_STATUS_RLIM_EN = 80,
+ BFA_STATUS_RLIM_DIS = 81,
+ BFA_STATUS_IOC_DISABLED = 82,
+ BFA_STATUS_ADAPTER_DISABLED = 83,
+ BFA_STATUS_BIOS_DISABLED = 84,
+ BFA_STATUS_AUTH_ENABLED = 85,
+ BFA_STATUS_AUTH_DISABLED = 86,
+ BFA_STATUS_ERROR_TRL_ENABLED = 87,
+ BFA_STATUS_ERROR_QOS_ENABLED = 88,
+ BFA_STATUS_NO_SFP_DEV = 89,
+ BFA_STATUS_MEMTEST_FAILED = 90,
+ BFA_STATUS_INVALID_DEVID = 91,
+ BFA_STATUS_QOS_ENABLED = 92,
+ BFA_STATUS_QOS_DISABLED = 93,
+ BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
+ BFA_STATUS_REG_FAIL = 95,
+ BFA_STATUS_IM_INV_CODE = 96,
+ BFA_STATUS_IM_INV_VLAN = 97,
+ BFA_STATUS_IM_INV_ADAPT_NAME = 98,
+ BFA_STATUS_IM_LOW_RESOURCES = 99,
+ BFA_STATUS_IM_VLANID_IS_PVID = 100,
+ BFA_STATUS_IM_VLANID_EXISTS = 101,
+ BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
+ BFA_STATUS_PORTLOG_ENABLED = 103,
+ BFA_STATUS_PORTLOG_DISABLED = 104,
+ BFA_STATUS_FILE_NOT_FOUND = 105,
+ BFA_STATUS_QOS_FC_ONLY = 106,
+ BFA_STATUS_RLIM_FC_ONLY = 107,
+ BFA_STATUS_CT_SPD = 108,
+ BFA_STATUS_LEDTEST_OP = 109,
+ BFA_STATUS_CEE_NOT_DN = 110,
+ BFA_STATUS_10G_SPD = 111,
+ BFA_STATUS_IM_INV_TEAM_NAME = 112,
+ BFA_STATUS_IM_DUP_TEAM_NAME = 113,
+ BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
+ BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
+ BFA_STATUS_IM_PVID_MISMATCH = 116,
+ BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
+ BFA_STATUS_IM_MTU_MISMATCH = 118,
+ BFA_STATUS_IM_RSS_MISMATCH = 119,
+ BFA_STATUS_IM_HDS_MISMATCH = 120,
+ BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
+ BFA_STATUS_IM_PORT_PARAMS = 122,
+ BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
+ BFA_STATUS_IM_CANNOT_REM_PRI = 124,
+ BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
+ BFA_STATUS_IM_LAST_PORT_DELETE = 126,
+ BFA_STATUS_IM_NO_DRIVER = 127,
+ BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
+ BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
+ BFA_STATUS_NO_MINPORT_DRIVER = 130,
+ BFA_STATUS_CARD_TYPE_MISMATCH = 131,
+ BFA_STATUS_BAD_ASICBLK = 132,
+ BFA_STATUS_NO_DRIVER = 133,
+ BFA_STATUS_INVALID_MAC = 134,
+ BFA_STATUS_IM_NO_VLAN = 135,
+ BFA_STATUS_IM_ETH_LB_FAILED = 136,
+ BFA_STATUS_IM_PVID_REMOVE = 137,
+ BFA_STATUS_IM_PVID_EDIT = 138,
+ BFA_STATUS_CNA_NO_BOOT = 139,
+ BFA_STATUS_IM_PVID_NON_ZERO = 140,
+ BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
+ BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
+ BFA_STATUS_IM_NOT_BOUND = 143,
+ BFA_STATUS_INSUFFICIENT_PERMS = 144,
+ BFA_STATUS_IM_INV_VLAN_NAME = 145,
+ BFA_STATUS_CMD_NOTSUPP_CNA = 146,
+ BFA_STATUS_IM_PASSTHRU_EDIT = 147,
+ BFA_STATUS_IM_BIND_FAILED = 148,
+ BFA_STATUS_IM_UNBIND_FAILED = 149,
+ BFA_STATUS_IM_PORT_IN_TEAM = 150,
+ BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
+ BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
+ BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
+ BFA_STATUS_PBC = 154,
+ BFA_STATUS_DEVID_MISSING = 155,
+ BFA_STATUS_BAD_FWCFG = 156,
+ BFA_STATUS_CREATE_FILE = 157,
+ BFA_STATUS_INVALID_VENDOR = 158,
+ BFA_STATUS_SFP_NOT_READY = 159,
+ BFA_STATUS_FLASH_UNINIT = 160,
+ BFA_STATUS_FLASH_EMPTY = 161,
+ BFA_STATUS_FLASH_CKFAIL = 162,
+ BFA_STATUS_TRUNK_UNSUPP = 163,
+ BFA_STATUS_TRUNK_ENABLED = 164,
+ BFA_STATUS_TRUNK_DISABLED = 165,
+ BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
+ BFA_STATUS_BOOT_CODE_UPDATED = 167,
+ BFA_STATUS_BOOT_VERSION = 168,
+ BFA_STATUS_CARDTYPE_MISSING = 169,
+ BFA_STATUS_INVALID_CARDTYPE = 170,
+ BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
+ BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
+ BFA_STATUS_ETHBOOT_ENABLED = 173,
+ BFA_STATUS_ETHBOOT_DISABLED = 174,
+ BFA_STATUS_IOPROFILE_OFF = 175,
+ BFA_STATUS_NO_PORT_INSTANCE = 176,
+ BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
+ BFA_STATUS_NO_VPORT_LOCK = 178,
+ BFA_STATUS_VPORT_NO_CNFG = 179,
+ BFA_STATUS_MAX_VAL
+};
+
+enum bfa_eproto_status {
+ BFA_EPROTO_BAD_ACCEPT = 0,
+ BFA_EPROTO_UNKNOWN_RSP = 1
+};
+
+#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
new file mode 100644
index 000000000000..73493de98de5
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.c
@@ -0,0 +1,1738 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+
+/**
+ * IOC local definitions
+ */
+
+#define bfa_ioc_timer_start(__ioc) \
+ mod_timer(&(__ioc)->ioc_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_TOV))
+#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
+
+#define bfa_ioc_recovery_timer_start(__ioc) \
+ mod_timer(&(__ioc)->ioc_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
+
+#define bfa_sem_timer_start(__ioc) \
+ mod_timer(&(__ioc)->sem_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
+#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
+
+#define bfa_hb_timer_start(__ioc) \
+ mod_timer(&(__ioc)->hb_timer, jiffies + \
+ msecs_to_jiffies(BFA_IOC_HB_TOV))
+#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
+
+/**
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_hbfail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
+
+#define bfa_ioc_is_optrom(__ioc) \
+ (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+
+#define bfa_ioc_mbox_cmd_pending(__ioc) \
+ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+bool bfa_nw_auto_recover = true;
+
+/*
+ * forward declarations
+ */
+static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_recover(struct bfa_ioc *ioc);
+static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param);
+static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
+static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
+static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
+ char *serial_num);
+static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
+ char *fw_ver);
+static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
+ char *chip_rev);
+static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
+ char *optrom_ver);
+static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
+ char *manufacturer);
+static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
+
+/**
+ * IOC state machine events
+ */
+enum ioc_event {
+ IOC_E_ENABLE = 1, /*!< IOC enable request */
+ IOC_E_DISABLE = 2, /*!< IOC disable request */
+ IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
+ IOC_E_FWREADY = 4, /*!< f/w initialization done */
+ IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
+ IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
+ IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
+ IOC_E_HBFAIL = 8, /*!< heartbeat failure */
+ IOC_E_HWERROR = 9, /*!< hardware error interrupt */
+ IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
+ IOC_E_DETACH = 11, /*!< driver detach cleanup */
+};
+
+bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+
+static struct bfa_sm_table ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+ {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
+ {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
+ {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+ {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+ {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+};
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
+{
+ ioc->retry_count = 0;
+ ioc->auto_recover = bfa_nw_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ break;
+
+ case IOC_E_DETACH:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ } else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
+ }
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /* fall through */
+
+ case IOC_E_DETACH:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Notify enable completion callback and generate mismatch AEN.
+ */
+static void
+bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
+{
+ /**
+ * Provide enable completion callback and AEN notification only once.
+ */
+ if (ioc->retry_count == 0)
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ ioc->retry_count++;
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_disable_comp(ioc);
+ /* fall through */
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_SEMLOCKED:
+ ioc->retry_count = 0;
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, false);
+}
+
+/**
+ * @brief
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWREADY:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_reset(ioc, true);
+ break;
+ }
+
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_enable(ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_ENABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ ioc->retry_count++;
+ if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+ writel(BFI_IOC_UNINIT,
+ ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ break;
+ }
+
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_FWREADY:
+ bfa_ioc_send_enable(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_getattr(ioc);
+}
+
+/**
+ * @brief
+ * IOC configuration in progress. Timer is active.
+ */
+static void
+bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /* fall through */
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_hb_monitor(ioc);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_HWERROR:
+ case IOC_E_FWREADY:
+ /**
+ * Hard error or IOC recovery by other function.
+ * Treat it same as heartbeat failure.
+ */
+ bfa_ioc_hb_stop(ioc);
+ /* !!! fall through !!! */
+
+ case IOC_E_HBFAIL:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_timer_start(ioc);
+ bfa_ioc_send_disable(ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_FWRSP_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOC_E_TIMEOUT:
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_FWREADY:
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_ioc_timer_start(ioc);
+}
+
+/**
+ * @brief
+ * Hardware initialization failed.
+ */
+static void
+bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_DISABLE:
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ /**
+ * Mark IOC as failed in hardware and stop firmware.
+ */
+ bfa_ioc_lpu_stop(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Notify other functions on HB failure.
+ */
+ bfa_ioc_notify_hbfail(ioc);
+
+ /**
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+
+ /**
+ * Flush any queued up mailbox requests.
+ */
+ bfa_ioc_mbox_hbfail(ioc);
+
+ /**
+ * Trigger auto-recovery after a delay.
+ */
+ if (ioc->auto_recover)
+ mod_timer(&ioc->ioc_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
+}
+
+/**
+ * @brief
+ * IOC heartbeat failure.
+ */
+static void
+bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ if (ioc->auto_recover)
+ bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_TIMEOUT:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ break;
+
+ case IOC_E_FWREADY:
+ /**
+ * Recovery is already initiated by other function.
+ */
+ break;
+
+ case IOC_E_HWERROR:
+ /*
+ * HB failure notification, ignore.
+ */
+ break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * BFA IOC private functions
+ */
+
+static void
+bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+
+ /**
+ * Notify common modules registered for notification.
+ */
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+}
+
+void
+bfa_nw_ioc_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
+}
+
+bool
+bfa_nw_ioc_sem_get(void __iomem *sem_reg)
+{
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
+
+ r32 = readl(sem_reg);
+
+ while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+ cnt++;
+ udelay(2);
+ r32 = readl(sem_reg);
+ }
+
+ if (r32 == 0)
+ return true;
+
+ BUG_ON(!(cnt < BFA_SEM_SPINCNT));
+ return false;
+}
+
+void
+bfa_nw_ioc_sem_release(void __iomem *sem_reg)
+{
+ writel(1, sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ /**
+ * First read to the semaphore register will return 0, subsequent reads
+ * will return 1. Semaphore is released by writing 1 to the register
+ */
+ r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+ if (r32 == 0) {
+ bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+ return;
+ }
+
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+}
+
+void
+bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
+{
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
+bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->sem_timer);
+}
+
+/**
+ * @brief
+ * Initialize LPU local memory (aka secondary memory / SRAM)
+ */
+static void
+bfa_ioc_lmem_init(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+ int i;
+#define PSS_LMEM_INIT_TIME 10000
+
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LMEM_RESET;
+ pss_ctl |= __PSS_LMEM_INIT_EN;
+
+ /*
+ * i2c workaround 12.5khz clock
+ */
+ pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+
+ /**
+ * wait for memory initialization to be complete
+ */
+ i = 0;
+ do {
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ i++;
+ } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
+
+ /**
+ * If memory initialization is not successful, IOC timeout will catch
+ * such failures.
+ */
+ BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
+
+ pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_start(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Take processor out of reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl &= ~__PSS_LPU0_RESET;
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+static void
+bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
+{
+ u32 pss_ctl;
+
+ /**
+ * Put processors in reset.
+ */
+ pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
+ pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
+
+ writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
+}
+
+/**
+ * Get driver and firmware versions.
+ */
+void
+bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ int i;
+ u32 *fwsig = (u32 *) fwhdr;
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
+ i++) {
+ fwsig[i] =
+ swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ loff += sizeof(u32);
+ }
+}
+
+/**
+ * Returns TRUE if same.
+ */
+bool
+bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+{
+ struct bfi_ioc_image_hdr *drv_fwhdr;
+ int i;
+
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+
+ for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
+ if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Return true if current running version is valid. Firmware signature and
+ * execution context (driver/bios) must match.
+ */
+static bool
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+
+ /**
+ * If bios/efi boot (flash based) -- return true
+ */
+ if (bfa_ioc_is_optrom(ioc))
+ return true;
+
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+
+ if (fwhdr.signature != drv_fwhdr->signature)
+ return false;
+
+ if (fwhdr.exec != drv_fwhdr->exec)
+ return false;
+
+ return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
+}
+
+/**
+ * Conditionally flush any pending message from firmware at start.
+ */
+static void
+bfa_ioc_msgflush(struct bfa_ioc *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if (r32)
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+/**
+ * @img ioc_init_logic.jpg
+ */
+static void
+bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ bool fwvalid;
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (force)
+ ioc_fwstate = BFI_IOC_UNINIT;
+
+ /**
+ * check if firmware is valid
+ */
+ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
+ false : bfa_ioc_fwver_valid(ioc);
+
+ if (!fwvalid) {
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ return;
+ }
+
+ /**
+ * If hardware initialization is in progress (initialized by other IOC),
+ * just wait for an initialization completion interrupt.
+ */
+ if (ioc_fwstate == BFI_IOC_INITING) {
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ return;
+ }
+
+ /**
+ * If IOC function is disabled and firmware version is same,
+ * just re-enable IOC.
+ *
+ * If option rom, IOC must not be in operational state. With
+ * convergence, IOC will be in operational state when 2nd driver
+ * is loaded.
+ */
+ if (ioc_fwstate == BFI_IOC_DISABLED ||
+ (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
+ /**
+ * When using MSI-X any pending firmware ready event should
+ * be flushed. Otherwise MSI-X interrupts are not delivered.
+ */
+ bfa_ioc_msgflush(ioc);
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ return;
+ }
+
+ /**
+ * Initialize the h/w for any other states.
+ */
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+}
+
+void
+bfa_nw_ioc_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+}
+
+static void
+bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
+{
+ u32 *msgp = (u32 *) ioc_msg;
+ u32 i;
+
+ BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
+
+ /*
+ * first write msg to mailbox registers
+ */
+ for (i = 0; i < len / sizeof(u32); i++)
+ writel(cpu_to_le32(msgp[i]),
+ ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
+ writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
+
+ /*
+ * write 1 to mailbox CMD to trigger LPU event
+ */
+ writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+ (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
+}
+
+static void
+bfa_ioc_send_enable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req enable_req;
+ struct timeval tv;
+
+ bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ bfa_ioc_portid(ioc));
+ enable_req.ioc_class = ioc->ioc_mc;
+ do_gettimeofday(&tv);
+ enable_req.tv_sec = ntohl(tv.tv_sec);
+ bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_disable(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_ctrl_req disable_req;
+
+ bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
+}
+
+static void
+bfa_ioc_send_getattr(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_getattr_req attr_req;
+
+ bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
+ bfa_ioc_portid(ioc));
+ bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
+ bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
+}
+
+void
+bfa_nw_ioc_hb_check(void *cbarg)
+{
+ struct bfa_ioc *ioc = cbarg;
+ u32 hb_count;
+
+ hb_count = readl(ioc->ioc_regs.heartbeat);
+ if (ioc->hb_count == hb_count) {
+ pr_crit("Firmware heartbeat failure at %d", hb_count);
+ bfa_ioc_recover(ioc);
+ return;
+ } else {
+ ioc->hb_count = hb_count;
+ }
+
+ bfa_ioc_mbox_poll(ioc);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
+{
+ ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
+ mod_timer(&ioc->hb_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HB_TOV));
+}
+
+static void
+bfa_ioc_hb_stop(struct bfa_ioc *ioc)
+{
+ del_timer(&ioc->hb_timer);
+}
+
+/**
+ * @brief
+ * Initiate a full firmware download.
+ */
+static void
+bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ u32 boot_param)
+{
+ u32 *fwimg;
+ u32 pgnum, pgoff;
+ u32 loff = 0;
+ u32 chunkno = 0;
+ u32 i;
+
+ /**
+ * Initialize LMEM first before code download
+ */
+ bfa_ioc_lmem_init(ioc);
+
+ /**
+ * Flash based firmware boot
+ */
+ if (bfa_ioc_is_optrom(ioc))
+ boot_type = BFI_BOOT_TYPE_FLASH;
+ fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+
+ pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+ pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+ if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
+ fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
+ }
+
+ /**
+ * write smem
+ */
+ writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
+ ((ioc->ioc_regs.smem_page_start) + (loff)));
+
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum,
+ ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(bfa_ioc_smem_pgnum(ioc, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * Set boot type and boot param at the end.
+ */
+ writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_TYPE_OFF)));
+ writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_PARAM_OFF)));
+}
+
+static void
+bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
+{
+ bfa_ioc_hwinit(ioc, force);
+}
+
+/**
+ * @brief
+ * Update BFA configuration from firmware configuration.
+ */
+static void
+bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_attr *attr = ioc->attr;
+
+ attr->adapter_prop = ntohl(attr->adapter_prop);
+ attr->card_type = ntohl(attr->card_type);
+ attr->maxfrsize = ntohs(attr->maxfrsize);
+
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
+}
+
+/**
+ * Attach time initialization of mbox logic.
+ */
+static void
+bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ int mc;
+
+ INIT_LIST_HEAD(&mod->cmd_q);
+ for (mc = 0; mc < BFI_MC_MAX; mc++) {
+ mod->mbhdlr[mc].cbfn = NULL;
+ mod->mbhdlr[mc].cbarg = ioc->bfa;
+ }
+}
+
+/**
+ * Mbox poll timer -- restarts any pending mailbox requests.
+ */
+static void
+bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+ u32 stat;
+
+ /**
+ * If no command pending, do nothing
+ */
+ if (list_empty(&mod->cmd_q))
+ return;
+
+ /**
+ * If previous command is not yet fetched by firmware, do nothing
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat)
+ return;
+
+ /**
+ * Enqueue command to firmware.
+ */
+ bfa_q_deq(&mod->cmd_q, &cmd);
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Cleanup any pending requests.
+ */
+static void
+bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfa_mbox_cmd *cmd;
+
+ while (!list_empty(&mod->cmd_q))
+ bfa_q_deq(&mod->cmd_q, &cmd);
+}
+
+/**
+ * IOC public
+ */
+static enum bfa_status
+bfa_ioc_pll_init(struct bfa_ioc *ioc)
+{
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ bfa_ioc_pll_init_asic(ioc);
+
+ ioc->pllinit = true;
+ /*
+ * release semaphore.
+ */
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Interface used by diag module to do firmware boot with memory test
+ * as the entry vector.
+ */
+static void
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+{
+ void __iomem *rb;
+
+ bfa_ioc_stats(ioc, ioc_boots);
+
+ if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
+ return;
+
+ /**
+ * Initialize IOC state of all functions on a chip reset.
+ */
+ rb = ioc->pcidev.pci_bar_kva;
+ if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+ } else {
+ writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+ }
+
+ bfa_ioc_msgflush(ioc);
+ bfa_ioc_download_fw(ioc, boot_type, boot_param);
+
+ /**
+ * Enable interrupts just before starting LPU
+ */
+ ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_ioc_lpu_start(ioc);
+}
+
+/**
+ * Enable/disable IOC failure auto recovery.
+ */
+void
+bfa_nw_ioc_auto_recover(bool auto_recover)
+{
+ bfa_nw_auto_recover = auto_recover;
+}
+
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+static void
+bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+{
+ u32 *msgp = mbmsg;
+ u32 r32;
+ int i;
+
+ /**
+ * read the MBOX msg
+ */
+ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
+ i++) {
+ r32 = readl(ioc->ioc_regs.lpu_mbox +
+ i * sizeof(u32));
+ msgp[i] = htonl(r32);
+ }
+
+ /**
+ * turn off mailbox interrupt by clearing mailbox status
+ */
+ writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+ readl(ioc->ioc_regs.lpu_mbox_cmd);
+}
+
+static void
+bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+{
+ union bfi_ioc_i2h_msg_u *msg;
+
+ msg = (union bfi_ioc_i2h_msg_u *) m;
+
+ bfa_ioc_stats(ioc, ioc_isrs);
+
+ switch (msg->mh.msg_id) {
+ case BFI_IOC_I2H_HBEAT:
+ break;
+
+ case BFI_IOC_I2H_READY_EVENT:
+ bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ break;
+
+ case BFI_IOC_I2H_ENABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+ break;
+
+ case BFI_IOC_I2H_DISABLE_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+ break;
+
+ case BFI_IOC_I2H_GETATTR_REPLY:
+ bfa_ioc_getattr_reply(ioc);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+}
+
+/**
+ * IOC attach time initialization and setup.
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] bfa driver instance structure
+ */
+void
+bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+{
+ ioc->bfa = bfa;
+ ioc->cbfn = cbfn;
+ ioc->fcmode = false;
+ ioc->pllinit = false;
+ ioc->dbg_fwsave_once = true;
+
+ bfa_ioc_mbox_attach(ioc);
+ INIT_LIST_HEAD(&ioc->hb_notify_q);
+
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+}
+
+/**
+ * Driver detach time IOC cleanup.
+ */
+void
+bfa_nw_ioc_detach(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DETACH);
+}
+
+/**
+ * Setup IOC PCI properties.
+ *
+ * @param[in] pcidev PCI device information for this IOC
+ */
+void
+bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_mclass mc)
+{
+ ioc->ioc_mc = mc;
+ ioc->pcidev = *pcidev;
+ ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
+ ioc->cna = ioc->ctdev && !ioc->fcmode;
+
+ bfa_nw_ioc_set_ct_hwif(ioc);
+
+ bfa_ioc_map_port(ioc);
+ bfa_ioc_reg_init(ioc);
+}
+
+/**
+ * Initialize IOC dma memory
+ *
+ * @param[in] dm_kva kernel virtual address of IOC dma memory
+ * @param[in] dm_pa physical address of IOC dma memory
+ */
+void
+bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
+{
+ /**
+ * dma memory for firmware attribute
+ */
+ ioc->attr_dma.kva = dm_kva;
+ ioc->attr_dma.pa = dm_pa;
+ ioc->attr = (struct bfi_ioc_attr *) dm_kva;
+}
+
+/**
+ * Return size of dma memory required.
+ */
+u32
+bfa_nw_ioc_meminfo(void)
+{
+ return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_nw_ioc_enable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_enables);
+ ioc->dbg_fwsave_once = true;
+
+ bfa_fsm_send_event(ioc, IOC_E_ENABLE);
+}
+
+void
+bfa_nw_ioc_disable(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_disables);
+ bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+}
+
+static u32
+bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
+}
+
+static u32
+bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
+{
+ return PSS_SMEM_PGOFF(fmaddr);
+}
+
+/**
+ * Register mailbox message handler function, to be called by common modules
+ */
+void
+bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+
+ mod->mbhdlr[mc].cbfn = cbfn;
+ mod->mbhdlr[mc].cbarg = cbarg;
+}
+
+/**
+ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
+ * Responsibility of caller to serialize
+ *
+ * @param[in] ioc IOC instance
+ * @param[i] cmd Mailbox command
+ */
+void
+bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ u32 stat;
+
+ /**
+ * If a previous command is pending, queue new command
+ */
+ if (!list_empty(&mod->cmd_q)) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * If mailbox is busy, queue command for poll timer
+ */
+ stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
+ if (stat) {
+ list_add_tail(&cmd->qe, &mod->cmd_q);
+ return;
+ }
+
+ /**
+ * mailbox is free -- queue command to firmware
+ */
+ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+}
+
+/**
+ * Handle mailbox interrupts
+ */
+void
+bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
+{
+ struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ struct bfi_mbmsg m;
+ int mc;
+
+ bfa_ioc_msgget(ioc, &m);
+
+ /**
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
+
+ if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+}
+
+void
+bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+}
+
+/**
+ * Add to IOC heartbeat failure notification queue. To be used by common
+ * modules such as cee, port, diag.
+ */
+void
+bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_hbfail_notify *notify)
+{
+ list_add_tail(&notify->qe, &ioc->hb_notify_q);
+}
+
+#define BFA_MFG_NAME "Brocade"
+static void
+bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ struct bfa_adapter_attr *ad_attr)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ ioc_attr = ioc->attr;
+
+ bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+ bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+ bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+ bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
+ memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+ sizeof(struct bfa_mfg_vpd));
+
+ ad_attr->nports = bfa_ioc_get_nports(ioc);
+ ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
+
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+ /* For now, model descr uses same model string */
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
+
+ ad_attr->card_type = ioc_attr->card_type;
+ ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
+
+ if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
+ ad_attr->prototype = 1;
+ else
+ ad_attr->prototype = 0;
+
+ ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+ ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
+
+ ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+ ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
+ ad_attr->asic_rev = ioc_attr->asic_rev;
+
+ bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
+
+ ad_attr->cna_capable = ioc->cna;
+ ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
+}
+
+static enum bfa_ioc_type
+bfa_ioc_get_type(struct bfa_ioc *ioc)
+{
+ if (!ioc->ctdev || ioc->fcmode)
+ return BFA_IOC_TYPE_FC;
+ else if (ioc->ioc_mc == BFI_MC_IOCFC)
+ return BFA_IOC_TYPE_FCoE;
+ else if (ioc->ioc_mc == BFI_MC_LL)
+ return BFA_IOC_TYPE_LL;
+ else {
+ BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
+ return BFA_IOC_TYPE_LL;
+ }
+}
+
+static void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
+{
+ memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ memcpy(serial_num,
+ (void *)ioc->attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
+{
+ memset(fw_ver, 0, BFA_VERSION_LEN);
+ memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+static void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
+{
+ BUG_ON(!(chip_rev));
+
+ memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+ chip_rev[0] = 'R';
+ chip_rev[1] = 'e';
+ chip_rev[2] = 'v';
+ chip_rev[3] = '-';
+ chip_rev[4] = ioc->attr->asic_rev;
+ chip_rev[5] = '\0';
+}
+
+static void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+{
+ memset(optrom_ver, 0, BFA_VERSION_LEN);
+ memcpy(optrom_ver, ioc->attr->optrom_version,
+ BFA_VERSION_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
+{
+ memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+static void
+bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+{
+ struct bfi_ioc_attr *ioc_attr;
+
+ BUG_ON(!(model));
+ memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ioc_attr = ioc->attr;
+
+ /**
+ * model name
+ */
+ snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+ BFA_MFG_NAME, ioc_attr->card_type);
+}
+
+static enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc *ioc)
+{
+ return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+}
+
+void
+bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
+{
+ memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
+
+ ioc_attr->state = bfa_ioc_get_state(ioc);
+ ioc_attr->port_id = ioc->port_id;
+
+ ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
+
+ bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
+
+ ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
+ ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
+ bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
+}
+
+/**
+ * WWN public
+ */
+static u64
+bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
+{
+ return ioc->attr->pwwn;
+}
+
+mac_t
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+{
+ /*
+ * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
+ */
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
+ return bfa_ioc_get_mfg_mac(ioc);
+ else
+ return ioc->attr->mac;
+}
+
+static mac_t
+bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
+{
+ mac_t m;
+
+ m = ioc->attr->mfg_mac;
+ if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+ m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+ else
+ bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+ bfa_ioc_pcifn(ioc));
+
+ return m;
+}
+
+/**
+ * Firmware failure detected. Start recovery actions.
+ */
+static void
+bfa_ioc_recover(struct bfa_ioc *ioc)
+{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+}
+
+static void
+bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
+{
+ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
+ return;
+
+}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
new file mode 100644
index 000000000000..7f0719e17efc
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.h
@@ -0,0 +1,301 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFA_IOC_H__
+#define __BFA_IOC_H__
+
+#include "bfa_sm.h"
+#include "bfi.h"
+#include "cna.h"
+
+#define BFA_IOC_TOV 3000 /* msecs */
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_HWINIT_MAX 2
+#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
+
+/**
+ * Generic Scatter Gather Element used by driver
+ */
+struct bfa_sge {
+ u32 sg_len;
+ void *sg_addr;
+};
+
+/**
+ * PCI device information required by IOC
+ */
+struct bfa_pcidev {
+ int pci_slot;
+ u8 pci_func;
+ u16 device_id;
+ void __iomem *pci_bar_kva;
+};
+
+/**
+ * Structure used to remember the DMA-able memory block's KVA and Physical
+ * Address
+ */
+struct bfa_dma {
+ void *kva; /* ! Kernel virtual address */
+ u64 pa; /* ! Physical address */
+};
+
+#define BFA_DMA_ALIGN_SZ 256
+
+/**
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
+#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
+
+/**
+ * @brief BFA dma address assignment macro
+ */
+#define bfa_dma_addr_set(dma_addr, pa) \
+ __bfa_dma_addr_set(&dma_addr, (u64)pa)
+
+static inline void
+__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) pa;
+ dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
+}
+
+/**
+ * @brief BFA dma address assignment macro. (big endian format)
+ */
+#define bfa_dma_be_addr_set(dma_addr, pa) \
+ __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
+static inline void
+__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+{
+ dma_addr->a32.addr_lo = (u32) htonl(pa);
+ dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
+}
+
+struct bfa_ioc_regs {
+ void __iomem *hfn_mbox_cmd;
+ void __iomem *hfn_mbox;
+ void __iomem *lpu_mbox_cmd;
+ void __iomem *lpu_mbox;
+ void __iomem *pss_ctl_reg;
+ void __iomem *pss_err_status_reg;
+ void __iomem *app_pll_fast_ctl_reg;
+ void __iomem *app_pll_slow_ctl_reg;
+ void __iomem *ioc_sem_reg;
+ void __iomem *ioc_usage_sem_reg;
+ void __iomem *ioc_init_sem_reg;
+ void __iomem *ioc_usage_reg;
+ void __iomem *host_page_num_fn;
+ void __iomem *heartbeat;
+ void __iomem *ioc_fwstate;
+ void __iomem *ll_halt;
+ void __iomem *err_set;
+ void __iomem *shirq_isr_next;
+ void __iomem *shirq_msk_next;
+ void __iomem *smem_page_start;
+ u32 smem_pg0;
+};
+
+/**
+ * IOC Mailbox structures
+ */
+struct bfa_mbox_cmd {
+ struct list_head qe;
+ u32 msg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * IOC mailbox module
+ */
+typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
+struct bfa_ioc_mbox_mod {
+ struct list_head cmd_q; /*!< pending mbox queue */
+ int nmclass; /*!< number of handlers */
+ struct {
+ bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
+ void *cbarg;
+ } mbhdlr[BFI_MC_MAX];
+};
+
+/**
+ * IOC callback function interfaces
+ */
+typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
+typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
+typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
+struct bfa_ioc_cbfn {
+ bfa_ioc_enable_cbfn_t enable_cbfn;
+ bfa_ioc_disable_cbfn_t disable_cbfn;
+ bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
+ bfa_ioc_reset_cbfn_t reset_cbfn;
+};
+
+/**
+ * Heartbeat failure notification queue element.
+ */
+struct bfa_ioc_hbfail_notify {
+ struct list_head qe;
+ bfa_ioc_hbfail_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
+ * Initialize a heartbeat failure notification structure
+ */
+#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+ (__notify)->cbfn = (__cbfn); \
+ (__notify)->cbarg = (__cbarg); \
+} while (0)
+
+struct bfa_ioc {
+ bfa_fsm_t fsm;
+ struct bfa *bfa;
+ struct bfa_pcidev pcidev;
+ struct bfa_timer_mod *timer_mod;
+ struct timer_list ioc_timer;
+ struct timer_list sem_timer;
+ struct timer_list hb_timer;
+ u32 hb_count;
+ u32 retry_count;
+ struct list_head hb_notify_q;
+ void *dbg_fwsave;
+ int dbg_fwsave_len;
+ bool dbg_fwsave_once;
+ enum bfi_mclass ioc_mc;
+ struct bfa_ioc_regs ioc_regs;
+ struct bfa_ioc_drv_stats stats;
+ bool auto_recover;
+ bool fcmode;
+ bool ctdev;
+ bool cna;
+ bool pllinit;
+ bool stats_busy; /*!< outstanding stats */
+ u8 port_id;
+
+ struct bfa_dma attr_dma;
+ struct bfi_ioc_attr *attr;
+ struct bfa_ioc_cbfn *cbfn;
+ struct bfa_ioc_mbox_mod mbox_mod;
+ struct bfa_ioc_hwif *ioc_hwif;
+};
+
+struct bfa_ioc_hwif {
+ enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
+ bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
+ void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
+ void (*ioc_reg_init) (struct bfa_ioc *ioc);
+ void (*ioc_map_port) (struct bfa_ioc *ioc);
+ void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
+ bool msix);
+ void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
+ void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+};
+
+#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
+#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
+#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
+#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_fetch_stats(__ioc, __stats) \
+ (((__stats)->drv_stats) = (__ioc)->stats)
+#define bfa_ioc_clr_stats(__ioc) \
+ memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
+#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
+#define bfa_ioc_speed_sup(__ioc) \
+ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+#define bfa_ioc_get_nports(__ioc) \
+ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+#define BFA_IOC_FWIMG_TYPE(__ioc) \
+ (((__ioc)->ctdev) ? \
+ (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
+ BFI_IMAGE_CB_FC)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
+ (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
+
+/**
+ * IOC mailbox interface
+ */
+void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
+void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
+void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+
+/**
+ * IOC interfaces
+ */
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+ (__ioc)->fcmode))
+
+#define bfa_ioc_isr_mode_set(__ioc, __msix) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_ownership_reset(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+
+void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
+ struct bfa_ioc_cbfn *cbfn);
+void bfa_nw_ioc_auto_recover(bool auto_recover);
+void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
+void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ enum bfi_mclass mc);
+u32 bfa_nw_ioc_meminfo(void);
+void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
+void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
+void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
+
+void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_hbfail_notify *notify);
+bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
+void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
+void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
+void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
+ struct bfi_ioc_image_hdr *fwhdr);
+mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+
+/*
+ * Timeout APIs
+ */
+void bfa_nw_ioc_timeout(void *ioc);
+void bfa_nw_ioc_hb_check(void *ioc);
+void bfa_nw_ioc_sem_timeout(void *ioc);
+
+/*
+ * F/W Image Size & Chunk
+ */
+u32 *bfa_cb_image_get_chunk(int type, u32 off);
+u32 bfa_cb_image_get_size(int type);
+
+#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
new file mode 100644
index 000000000000..462857cbab9b
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -0,0 +1,392 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+
+/*
+ * forward declarations
+ */
+static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+
+struct bfa_ioc_hwif nw_hwif_ct;
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+{
+ nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+ nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+ nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+ nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+ nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+ nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+ nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+ nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+
+ ioc->ioc_hwif = &nw_hwif_ct;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bool
+bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr fwhdr;
+
+ /**
+ * Firmware match check is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return true;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return true;
+
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+ }
+
+ ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return false;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ return true;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * Firmware lock is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return;
+
+ /**
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ BFA_IOC_FWIMG_MINSZ)
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ BUG_ON(!(usecnt > 0));
+
+ usecnt--;
+ writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
+{
+ if (ioc->cna) {
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ /* Wait for halt to take effect */
+ readl(ioc->ioc_regs.ll_halt);
+ } else {
+ writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ readl(ioc->ioc_regs.err_set);
+ }
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
+ { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
+ { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+{
+ void __iomem *rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = readl(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = readl(rb + FNC_PERS_REG);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if (!msix && mode)
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+
+ writel(r32, rb + FNC_PERS_REG);
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+{
+ if (ioc->cna) {
+ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_usage_reg);
+ bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ }
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ bfa_nw_ioc_hw_sem_release(ioc);
+}
+
+static enum bfa_status
+bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
+{
+ u32 pll_sclk, pll_fclk, r32;
+
+ pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
+ __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
+ __APP_PLL_312_JITLMT0_1(3U) |
+ __APP_PLL_312_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
+ __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
+ __APP_PLL_425_JITLMT0_1(3U) |
+ __APP_PLL_425_CNTLMT0_1(1U);
+ if (fcmode) {
+ writel(0, (rb + OP_MODE));
+ writel(__APP_EMS_CMLCKSEL |
+ __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL,
+ (rb + ETH_MAC_SER_REG));
+ } else {
+ writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+ writel(__APP_EMS_REFCKBUFEN1,
+ (rb + ETH_MAC_SER_REG));
+ }
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+ writel(pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET,
+ rb + APP_PLL_425_CTL_REG);
+ writel(pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ readl(rb + HOSTFN0_INT_MSK);
+ udelay(2000);
+ writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+ writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+ writel(pll_sclk |
+ __APP_PLL_312_ENABLE,
+ rb + APP_PLL_312_CTL_REG);
+ writel(pll_fclk |
+ __APP_PLL_425_ENABLE,
+ rb + APP_PLL_425_CTL_REG);
+ if (!fcmode) {
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+ writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
+ }
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+ if (!fcmode) {
+ writel(0, (rb + PMM_1T_RESET_REG_P0));
+ writel(0, (rb + PMM_1T_RESET_REG_P1));
+ }
+
+ writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+ udelay(1000);
+ r32 = readl((rb + MBIST_STAT_REG));
+ writel(0, (rb + MBIST_CTL_REG));
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
new file mode 100644
index 000000000000..1d3d975d6f68
--- /dev/null
+++ b/drivers/net/bna/bfa_sm.h
@@ -0,0 +1,88 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfasm.h State machine defines
+ */
+
+#ifndef __BFA_SM_H__
+#define __BFA_SM_H__
+
+#include "cna.h"
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm) ((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
+
+/**
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table {
+ bfa_sm_t sm; /*!< state machine function */
+ int state; /*!< state machine encoding */
+ char *name; /*!< state name for display */
+};
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+
+/**
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
+ static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state) \
+ ((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
+{
+ int i = 0;
+
+ while (smt[i].sm && smt[i].sm != sm)
+ i++;
+ return smt[i].state;
+}
+#endif
diff --git a/drivers/net/bna/bfa_wc.h b/drivers/net/bna/bfa_wc.h
new file mode 100644
index 000000000000..d0e4caee67b0
--- /dev/null
+++ b/drivers/net/bna/bfa_wc.h
@@ -0,0 +1,69 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/**
+ * @file bfa_wc.h Generic wait counter.
+ */
+
+#ifndef __BFA_WC_H__
+#define __BFA_WC_H__
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc *wc)
+{
+ bfa_wc_down(wc);
+}
+
+#endif
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
new file mode 100644
index 000000000000..a97396811050
--- /dev/null
+++ b/drivers/net/bna/bfi.h
@@ -0,0 +1,392 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __BFI_H__
+#define __BFI_H__
+
+#include "bfa_defs.h"
+
+#pragma pack(1)
+
+/**
+ * BFI FW image type
+ */
+#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
+#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
+enum {
+ BFI_IMAGE_CB_FC,
+ BFI_IMAGE_CT_FC,
+ BFI_IMAGE_CT_CNA,
+ BFI_IMAGE_MAX,
+};
+
+/**
+ * Msg header common to all msgs
+ */
+struct bfi_mhdr {
+ u8 msg_class; /*!< @ref enum bfi_mclass */
+ u8 msg_id; /*!< msg opcode with in the class */
+ union {
+ struct {
+ u8 rsvd;
+ u8 lpu_id; /*!< msg destination */
+ } h2i;
+ u16 i2htok; /*!< token in msgs to host */
+ } mtag;
+};
+
+#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.h2i.lpu_id = (_lpuid); \
+} while (0)
+
+#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
+ (_mh).msg_class = (_mc); \
+ (_mh).msg_id = (_op); \
+ (_mh).mtag.i2htok = (_i2htok); \
+} while (0)
+
+/*
+ * Message opcodes: 0-127 to firmware, 128-255 to host
+ */
+#define BFI_I2H_OPCODE_BASE 128
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+
+/**
+ ****************************************************************************
+ *
+ * Scatter Gather Element and Page definition
+ *
+ ****************************************************************************
+ */
+
+#define BFI_SGE_INLINE 1
+#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
+
+/**
+ * SG Flags
+ */
+enum {
+ BFI_SGE_DATA = 0, /*!< data address, not last */
+ BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
+ BFI_SGE_DATA_LAST = 3, /*!< data address, last */
+ BFI_SGE_LINK = 2, /*!< link address */
+ BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
+};
+
+/**
+ * DMA addresses
+ */
+union bfi_addr_u {
+ struct {
+ u32 addr_lo;
+ u32 addr_hi;
+ } a32;
+};
+
+/**
+ * Scatter Gather Element
+ */
+struct bfi_sge {
+#ifdef __BIGENDIAN
+ u32 flags:2,
+ rsvd:2,
+ sg_len:28;
+#else
+ u32 sg_len:28,
+ rsvd:2,
+ flags:2;
+#endif
+ union bfi_addr_u sga;
+};
+
+/**
+ * Scatter Gather Page
+ */
+#define BFI_SGPG_DATA_SGES 7
+#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
+#define BFI_SGPG_RSVD_WD_LEN 8
+struct bfi_sgpg {
+ struct bfi_sge sges[BFI_SGPG_SGES_MAX];
+ u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
+};
+
+/*
+ * Large Message structure - 128 Bytes size Msgs
+ */
+#define BFI_LMSG_SZ 128
+#define BFI_LMSG_PL_WSZ \
+ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
+
+struct bfi_msg {
+ struct bfi_mhdr mhdr;
+ u32 pl[BFI_LMSG_PL_WSZ];
+};
+
+/**
+ * Mailbox message structure
+ */
+#define BFI_MBMSG_SZ 7
+struct bfi_mbmsg {
+ struct bfi_mhdr mh;
+ u32 pl[BFI_MBMSG_SZ];
+};
+
+/**
+ * Message Classes
+ */
+enum bfi_mclass {
+ BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /*!< Flash message class */
+ BFI_MC_CEE = 4, /*!< CEE */
+ BFI_MC_FCPORT = 5, /*!< FC port */
+ BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
+ BFI_MC_LL = 7, /*!< Link Layer */
+ BFI_MC_UF = 8, /*!< Unsolicited frame receive */
+ BFI_MC_FCXP = 9, /*!< FC Transport */
+ BFI_MC_LPS = 10, /*!< lport fc login services */
+ BFI_MC_RPORT = 11, /*!< Remote port */
+ BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
+ BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
+ BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
+ BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
+ BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
+ BFI_MC_TSKIM = 18, /*!< Initiator Task management */
+ BFI_MC_SBOOT = 19, /*!< SAN boot services */
+ BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
+ BFI_MC_PORT = 21, /*!< Physical port */
+ BFI_MC_SFP = 22, /*!< SFP module */
+ BFI_MC_MSGQ = 23, /*!< MSGQ */
+ BFI_MC_ENET = 24, /*!< ENET commands/responses */
+ BFI_MC_MAX = 32
+};
+
+#define BFI_IOC_MAX_CQS 4
+#define BFI_IOC_MAX_CQS_ASIC 8
+#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
+
+#define BFI_BOOT_TYPE_OFF 8
+#define BFI_BOOT_PARAM_OFF 12
+
+#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_FLASH 1
+#define BFI_BOOT_TYPE_MEMTEST 2
+
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
+
+/**
+ *----------------------------------------------------------------------
+ * IOC
+ *----------------------------------------------------------------------
+ */
+
+enum bfi_ioc_h2i_msgs {
+ BFI_IOC_H2I_ENABLE_REQ = 1,
+ BFI_IOC_H2I_DISABLE_REQ = 2,
+ BFI_IOC_H2I_GETATTR_REQ = 3,
+ BFI_IOC_H2I_DBG_SYNC = 4,
+ BFI_IOC_H2I_DBG_DUMP = 5,
+};
+
+enum bfi_ioc_i2h_msgs {
+ BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
+};
+
+/**
+ * BFI_IOC_H2I_GETATTR_REQ message
+ */
+struct bfi_ioc_getattr_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u attr_addr;
+};
+
+struct bfi_ioc_attr {
+ u64 mfg_pwwn; /*!< Mfg port wwn */
+ u64 mfg_nwwn; /*!< Mfg node wwn */
+ mac_t mfg_mac; /*!< Mfg mac */
+ u16 rsvd_a;
+ u64 pwwn;
+ u64 nwwn;
+ mac_t mac; /*!< PBC or Mfg mac */
+ u16 rsvd_b;
+ mac_t fcoe_mac;
+ u16 rsvd_c;
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ u8 pcie_gen;
+ u8 pcie_lanes_orig;
+ u8 pcie_lanes;
+ u8 rx_bbcredit; /*!< receive buffer credits */
+ u32 adapter_prop; /*!< adapter properties */
+ u16 maxfrsize; /*!< max receive frame size */
+ char asic_rev;
+ u8 rsvd_d;
+ char fw_version[BFA_VERSION_LEN];
+ char optrom_version[BFA_VERSION_LEN];
+ struct bfa_mfg_vpd vpd;
+ u32 card_type; /*!< card type */
+};
+
+/**
+ * BFI_IOC_I2H_GETATTR_REPLY message
+ */
+struct bfi_ioc_getattr_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< cfg reply status */
+ u8 rsvd[3];
+};
+
+/**
+ * Firmware memory page offsets
+ */
+#define BFI_IOC_SMEM_PG0_CB (0x40)
+#define BFI_IOC_SMEM_PG0_CT (0x180)
+
+/**
+ * Firmware statistic offset
+ */
+#define BFI_IOC_FWSTATS_OFF (0x6B40)
+#define BFI_IOC_FWSTATS_SZ (4096)
+
+/**
+ * Firmware trace offset
+ */
+#define BFI_IOC_TRC_OFF (0x4b00)
+#define BFI_IOC_TRC_ENTS 256
+
+#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
+#define BFI_IOC_MD5SUM_SZ 4
+struct bfi_ioc_image_hdr {
+ u32 signature; /*!< constant signature */
+ u32 rsvd_a;
+ u32 exec; /*!< exec vector */
+ u32 param; /*!< parameters */
+ u32 rsvd_b[4];
+ u32 md5sum[BFI_IOC_MD5SUM_SZ];
+};
+
+/**
+ * BFI_IOC_I2H_READY_EVENT message
+ */
+struct bfi_ioc_rdy_event {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 init_status; /*!< init event status */
+ u8 rsvd[3];
+};
+
+struct bfi_ioc_hbeat {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 hb_count; /*!< current heart beat count */
+};
+
+/**
+ * IOC hardware/firmware state
+ */
+enum bfi_ioc_state {
+ BFI_IOC_UNINIT = 0, /*!< not initialized */
+ BFI_IOC_INITING = 1, /*!< h/w is being initialized */
+ BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
+ BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
+ BFI_IOC_OP = 4, /*!< IOC is operational */
+ BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
+ BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
+ BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
+ BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
+};
+
+#define BFI_IOC_ENDIAN_SIG 0x12345678
+
+enum {
+ BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
+ BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
+ BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
+ BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
+ BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
+ BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
+ BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
+ BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
+ BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
+ BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
+};
+
+#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
+ (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
+ BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_SETP(__prop, __val) \
+ ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
+#define BFI_ADAPTER_IS_PROTO(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_PROTO)
+#define BFI_ADAPTER_IS_TTV(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_TTV)
+#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
+ ((__adap_type) & BFI_ADAPTER_UNSUPP)
+#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
+ ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
+ BFI_ADAPTER_UNSUPP))
+
+/**
+ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
+ */
+struct bfi_ioc_ctrl_req {
+ struct bfi_mhdr mh;
+ u8 ioc_class;
+ u8 rsvd[3];
+ u32 tv_sec;
+};
+
+/**
+ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
+ */
+struct bfi_ioc_ctrl_reply {
+ struct bfi_mhdr mh; /*!< Common msg header */
+ u8 status; /*!< enable/disable status */
+ u8 rsvd[3];
+};
+
+#define BFI_IOC_MSGSZ 8
+/**
+ * H2I Messages
+ */
+union bfi_ioc_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_ctrl_req enable_req;
+ struct bfi_ioc_ctrl_req disable_req;
+ struct bfi_ioc_getattr_req getattr_req;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+/**
+ * I2H Messages
+ */
+union bfi_ioc_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_ioc_rdy_event rdy_event;
+ u32 mboxmsg[BFI_IOC_MSGSZ];
+};
+
+#pragma pack()
+
+#endif /* __BFI_H__ */
diff --git a/drivers/net/bna/bfi_cna.h b/drivers/net/bna/bfi_cna.h
new file mode 100644
index 000000000000..4eecabea397b
--- /dev/null
+++ b/drivers/net/bna/bfi_cna.h
@@ -0,0 +1,199 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_CNA_H__
+#define __BFI_CNA_H__
+
+#include "bfi.h"
+#include "bfa_defs_cna.h"
+
+#pragma pack(1)
+
+enum bfi_port_h2i {
+ BFI_PORT_H2I_ENABLE_REQ = (1),
+ BFI_PORT_H2I_DISABLE_REQ = (2),
+ BFI_PORT_H2I_GET_STATS_REQ = (3),
+ BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
+};
+
+enum bfi_port_i2h {
+ BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
+ BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
+};
+
+/**
+ * Generic REQ type
+ */
+struct bfi_port_generic_req {
+ struct bfi_mhdr mh; /*!< msg header */
+ u32 msgtag; /*!< msgtag for reply */
+ u32 rsvd;
+};
+
+/**
+ * Generic RSP type
+ */
+struct bfi_port_generic_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 status; /*!< port enable status */
+ u8 rsvd[3];
+ u32 msgtag; /*!< msgtag for reply */
+};
+
+/**
+ * @todo
+ * BFI_PORT_H2I_ENABLE_REQ
+ */
+
+/**
+ * @todo
+ * BFI_PORT_I2H_ENABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_DISABLE_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_DISABLE_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_GET_STATS_REQ
+ */
+struct bfi_port_get_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ union bfi_addr_u dma_addr;
+};
+
+/**
+ * BFI_PORT_I2H_GET_STATS_RSP
+ */
+
+/**
+ * BFI_PORT_H2I_CLEAR_STATS_REQ
+ */
+
+/**
+ * BFI_PORT_I2H_CLEAR_STATS_RSP
+ */
+
+union bfi_port_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_req enable_req;
+ struct bfi_port_generic_req disable_req;
+ struct bfi_port_get_stats_req getstats_req;
+ struct bfi_port_generic_req clearstats_req;
+};
+
+union bfi_port_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_port_generic_rsp enable_rsp;
+ struct bfi_port_generic_rsp disable_rsp;
+ struct bfi_port_generic_rsp getstats_rsp;
+ struct bfi_port_generic_rsp clearstats_rsp;
+};
+
+/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
+enum bfi_cee_h2i_msgs {
+ BFI_CEE_H2I_GET_CFG_REQ = 1,
+ BFI_CEE_H2I_RESET_STATS = 2,
+ BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
+enum bfi_cee_i2h_msgs {
+ BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+ BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+ BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/* Data structures */
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_lldp_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief H2I command structure for resetting the stats.
+ * BFI_CEE_H2I_RESET_STATS
+ */
+struct bfi_cee_reset_stats {
+ struct bfi_mhdr mh;
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_CFG_REQ
+ */
+struct bfi_cee_get_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_CFG_RSP
+ */
+struct bfi_cee_get_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/*
+ * @brief get configuration command from host
+ * BFI_CEE_H2I_GET_STATS_REQ
+ */
+struct bfi_cee_stats_req {
+ struct bfi_mhdr mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * @brief reply message from firmware
+ * BFI_CEE_I2H_GET_STATS_RSP
+ */
+struct bfi_cee_stats_rsp {
+ struct bfi_mhdr mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* @brief mailbox command structures from host to firmware */
+union bfi_cee_h2i_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_req get_req;
+ struct bfi_cee_stats_req stats_req;
+};
+
+/* @brief mailbox message structures from firmware to host */
+union bfi_cee_i2h_msg_u {
+ struct bfi_mhdr mh;
+ struct bfi_cee_get_rsp get_rsp;
+ struct bfi_cee_stats_rsp stats_rsp;
+};
+
+#pragma pack()
+
+#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
new file mode 100644
index 000000000000..404ea351d4a1
--- /dev/null
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -0,0 +1,637 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+/*
+ * bfi_ctreg.h catapult host block register definitions
+ *
+ * !!! Do not edit. Auto generated. !!!
+ */
+
+#ifndef __BFI_CTREG_H__
+#define __BFI_CTREG_H__
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200
+#define HOSTFN1_LPU_MBOX0_8 0x00019260
+#define LPU_HOSTFN0_MBOX0_0 0x00019280
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0
+#define HOSTFN2_LPU_MBOX0_0 0x00019400
+#define HOSTFN3_LPU_MBOX0_8 0x00019460
+#define LPU_HOSTFN2_MBOX0_0 0x00019480
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0
+#define HOSTFN0_INT_STATUS 0x00014000
+#define __HOSTFN0_HALT_OCCURRED 0x01000000
+#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN0_INT_STATUS_LVL_SH 20
+#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
+#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN0_INT_STATUS_P_SH 16
+#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
+#define __HOSTFN0_INT_STATUS_F 0x0000ffff
+#define HOSTFN0_INT_MSK 0x00014004
+#define HOST_PAGE_NUM_FN0 0x00014008
+#define __HOST_PAGE_NUM_FN 0x000001ff
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
+#define __MSIX_ERR_INDEX_FN 0x000001ff
+#define HOSTFN1_INT_STATUS 0x00014100
+#define __HOSTFN1_HALT_OCCURRED 0x01000000
+#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN1_INT_STATUS_LVL_SH 20
+#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
+#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN1_INT_STATUS_P_SH 16
+#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
+#define __HOSTFN1_INT_STATUS_F 0x0000ffff
+#define HOSTFN1_INT_MSK 0x00014104
+#define HOST_PAGE_NUM_FN1 0x00014108
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
+#define APP_PLL_425_CTL_REG 0x00014204
+#define __P_425_PLL_LOCK 0x80000000
+#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_425_RESET_TIMER_SH 17
+#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
+#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_425_CNTLMT0_1_SH 14
+#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
+#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_425_JITLMT0_1_SH 12
+#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
+#define __APP_PLL_425_HREF 0x00000800
+#define __APP_PLL_425_HDIV 0x00000400
+#define __APP_PLL_425_P0_1_MK 0x00000300
+#define __APP_PLL_425_P0_1_SH 8
+#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
+#define __APP_PLL_425_Z0_2_MK 0x000000e0
+#define __APP_PLL_425_Z0_2_SH 5
+#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
+#define __APP_PLL_425_RSEL200500 0x00000010
+#define __APP_PLL_425_ENARST 0x00000008
+#define __APP_PLL_425_BYPASS 0x00000004
+#define __APP_PLL_425_LRESETN 0x00000002
+#define __APP_PLL_425_ENABLE 0x00000001
+#define APP_PLL_312_CTL_REG 0x00014208
+#define __P_312_PLL_LOCK 0x80000000
+#define __ENABLE_MAC_AHB_1 0x00800000
+#define __ENABLE_MAC_AHB_0 0x00400000
+#define __ENABLE_MAC_1 0x00200000
+#define __ENABLE_MAC_0 0x00100000
+#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_312_RESET_TIMER_SH 17
+#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
+#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_312_CNTLMT0_1_SH 14
+#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
+#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_312_JITLMT0_1_SH 12
+#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
+#define __APP_PLL_312_HREF 0x00000800
+#define __APP_PLL_312_HDIV 0x00000400
+#define __APP_PLL_312_P0_1_MK 0x00000300
+#define __APP_PLL_312_P0_1_SH 8
+#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
+#define __APP_PLL_312_Z0_2_MK 0x000000e0
+#define __APP_PLL_312_Z0_2_SH 5
+#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
+#define __APP_PLL_312_RSEL200500 0x00000010
+#define __APP_PLL_312_ENARST 0x00000008
+#define __APP_PLL_312_BYPASS 0x00000004
+#define __APP_PLL_312_LRESETN 0x00000002
+#define __APP_PLL_312_ENABLE 0x00000001
+#define MBIST_CTL_REG 0x00014220
+#define __EDRAM_BISTR_START 0x00000004
+#define __MBIST_RESET 0x00000002
+#define __MBIST_START 0x00000001
+#define MBIST_STAT_REG 0x00014224
+#define __EDRAM_BISTR_STATUS 0x00000008
+#define __EDRAM_BISTR_DONE 0x00000004
+#define __MEM_BIT_STATUS 0x00000002
+#define __MBIST_DONE 0x00000001
+#define HOST_SEM0_REG 0x00014230
+#define __HOST_SEMAPHORE 0x00000001
+#define HOST_SEM1_REG 0x00014234
+#define HOST_SEM2_REG 0x00014238
+#define HOST_SEM3_REG 0x0001423c
+#define HOST_SEM0_INFO_REG 0x00014240
+#define HOST_SEM1_INFO_REG 0x00014244
+#define HOST_SEM2_INFO_REG 0x00014248
+#define HOST_SEM3_INFO_REG 0x0001424c
+#define ETH_MAC_SER_REG 0x00014288
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define HOSTFN2_INT_STATUS 0x00014300
+#define __HOSTFN2_HALT_OCCURRED 0x01000000
+#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN2_INT_STATUS_LVL_SH 20
+#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
+#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN2_INT_STATUS_P_SH 16
+#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
+#define __HOSTFN2_INT_STATUS_F 0x0000ffff
+#define HOSTFN2_INT_MSK 0x00014304
+#define HOST_PAGE_NUM_FN2 0x00014308
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
+#define HOSTFN3_INT_STATUS 0x00014400
+#define __HALT_OCCURRED 0x01000000
+#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
+#define __HOSTFN3_INT_STATUS_LVL_SH 20
+#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
+#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
+#define __HOSTFN3_INT_STATUS_P_SH 16
+#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
+#define __HOSTFN3_INT_STATUS_F 0x0000ffff
+#define HOSTFN3_INT_MSK 0x00014404
+#define HOST_PAGE_NUM_FN3 0x00014408
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
+#define FNC_ID_REG 0x00014600
+#define __FUNCTION_NUMBER 0x00000007
+#define FNC_PERS_REG 0x00014604
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+#define OP_MODE 0x0001460c
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define HOST_SEM4_REG 0x00014610
+#define HOST_SEM5_REG 0x00014614
+#define HOST_SEM6_REG 0x00014618
+#define HOST_SEM7_REG 0x0001461c
+#define HOST_SEM4_INFO_REG 0x00014620
+#define HOST_SEM5_INFO_REG 0x00014624
+#define HOST_SEM6_INFO_REG 0x00014628
+#define HOST_SEM7_INFO_REG 0x0001462c
+#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
+#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
+#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
+#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
+#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
+#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
+#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
+#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
+#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
+#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
+#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
+#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
+#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
+#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
+#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
+#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
+#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
+#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
+#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
+#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
+#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
+#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
+#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
+#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
+#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
+#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
+#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
+#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
+#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
+#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
+#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc
+#define CPE_PI_PTR_Q0 0x00038000
+#define __CPE_PI_UNUSED_MK 0xffff0000
+#define __CPE_PI_UNUSED_SH 16
+#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
+#define __CPE_PI_PTR 0x0000ffff
+#define CPE_PI_PTR_Q1 0x00038040
+#define CPE_CI_PTR_Q0 0x00038004
+#define __CPE_CI_UNUSED_MK 0xffff0000
+#define __CPE_CI_UNUSED_SH 16
+#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
+#define __CPE_CI_PTR 0x0000ffff
+#define CPE_CI_PTR_Q1 0x00038044
+#define CPE_DEPTH_Q0 0x00038008
+#define __CPE_DEPTH_UNUSED_MK 0xf8000000
+#define __CPE_DEPTH_UNUSED_SH 27
+#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
+#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
+#define __CPE_MSIX_VEC_INDEX_SH 16
+#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
+#define __CPE_DEPTH 0x0000ffff
+#define CPE_DEPTH_Q1 0x00038048
+#define CPE_QCTRL_Q0 0x0003800c
+#define __CPE_CTRL_UNUSED30_MK 0xfc000000
+#define __CPE_CTRL_UNUSED30_SH 26
+#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
+#define __CPE_FUNC_INT_CTRL_MK 0x03000000
+#define __CPE_FUNC_INT_CTRL_SH 24
+#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
+enum {
+ __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
+ __CPE_FUNC_INT_CTRL_F2NF = 0x1,
+ __CPE_FUNC_INT_CTRL_3QUART = 0x2,
+ __CPE_FUNC_INT_CTRL_HALF = 0x3,
+};
+#define __CPE_CTRL_UNUSED20_MK 0x00f00000
+#define __CPE_CTRL_UNUSED20_SH 20
+#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
+#define __CPE_SCI_TH_MK 0x000f0000
+#define __CPE_SCI_TH_SH 16
+#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
+#define __CPE_CTRL_UNUSED10_MK 0x0000c000
+#define __CPE_CTRL_UNUSED10_SH 14
+#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
+#define __CPE_ACK_PENDING 0x00002000
+#define __CPE_CTRL_UNUSED40_MK 0x00001c00
+#define __CPE_CTRL_UNUSED40_SH 10
+#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
+#define __CPE_PCIEID_MK 0x00000300
+#define __CPE_PCIEID_SH 8
+#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
+#define __CPE_CTRL_UNUSED00_MK 0x000000fe
+#define __CPE_CTRL_UNUSED00_SH 1
+#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
+#define __CPE_ESIZE 0x00000001
+#define CPE_QCTRL_Q1 0x0003804c
+#define __CPE_CTRL_UNUSED31_MK 0xfc000000
+#define __CPE_CTRL_UNUSED31_SH 26
+#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
+#define __CPE_CTRL_UNUSED21_MK 0x00f00000
+#define __CPE_CTRL_UNUSED21_SH 20
+#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
+#define __CPE_CTRL_UNUSED11_MK 0x0000c000
+#define __CPE_CTRL_UNUSED11_SH 14
+#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
+#define __CPE_CTRL_UNUSED41_MK 0x00001c00
+#define __CPE_CTRL_UNUSED41_SH 10
+#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
+#define __CPE_CTRL_UNUSED01_MK 0x000000fe
+#define __CPE_CTRL_UNUSED01_SH 1
+#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
+#define RME_PI_PTR_Q0 0x00038020
+#define __LATENCY_TIME_STAMP_MK 0xffff0000
+#define __LATENCY_TIME_STAMP_SH 16
+#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
+#define __RME_PI_PTR 0x0000ffff
+#define RME_PI_PTR_Q1 0x00038060
+#define RME_CI_PTR_Q0 0x00038024
+#define __DELAY_TIME_STAMP_MK 0xffff0000
+#define __DELAY_TIME_STAMP_SH 16
+#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
+#define __RME_CI_PTR 0x0000ffff
+#define RME_CI_PTR_Q1 0x00038064
+#define RME_DEPTH_Q0 0x00038028
+#define __RME_DEPTH_UNUSED_MK 0xf8000000
+#define __RME_DEPTH_UNUSED_SH 27
+#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
+#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
+#define __RME_MSIX_VEC_INDEX_SH 16
+#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
+#define __RME_DEPTH 0x0000ffff
+#define RME_DEPTH_Q1 0x00038068
+#define RME_QCTRL_Q0 0x0003802c
+#define __RME_INT_LATENCY_TIMER_MK 0xff000000
+#define __RME_INT_LATENCY_TIMER_SH 24
+#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
+#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
+#define __RME_INT_DELAY_TIMER_SH 16
+#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
+#define __RME_INT_DELAY_DISABLE 0x00008000
+#define __RME_DLY_DELAY_DISABLE 0x00004000
+#define __RME_ACK_PENDING 0x00002000
+#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
+#define __RME_CTRL_UNUSED10_MK 0x00000c00
+#define __RME_CTRL_UNUSED10_SH 10
+#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
+#define __RME_PCIEID_MK 0x00000300
+#define __RME_PCIEID_SH 8
+#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
+#define __RME_CTRL_UNUSED00_MK 0x000000fe
+#define __RME_CTRL_UNUSED00_SH 1
+#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
+#define __RME_ESIZE 0x00000001
+#define RME_QCTRL_Q1 0x0003806c
+#define __RME_CTRL_UNUSED11_MK 0x00000c00
+#define __RME_CTRL_UNUSED11_SH 10
+#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
+#define __RME_CTRL_UNUSED01_MK 0x000000fe
+#define __RME_CTRL_UNUSED01_SH 1
+#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
+#define PSS_CTL_REG 0x00018800
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810
+#define __PSS_LPU1_TCM_READ_ERR 0x00200000
+#define __PSS_LPU0_TCM_READ_ERR 0x00100000
+#define __PSS_LMEM5_CORR_ERR 0x00080000
+#define __PSS_LMEM4_CORR_ERR 0x00040000
+#define __PSS_LMEM3_CORR_ERR 0x00020000
+#define __PSS_LMEM2_CORR_ERR 0x00010000
+#define __PSS_LMEM1_CORR_ERR 0x00008000
+#define __PSS_LMEM0_CORR_ERR 0x00004000
+#define __PSS_LMEM5_UNCORR_ERR 0x00002000
+#define __PSS_LMEM4_UNCORR_ERR 0x00001000
+#define __PSS_LMEM3_UNCORR_ERR 0x00000800
+#define __PSS_LMEM2_UNCORR_ERR 0x00000400
+#define __PSS_LMEM1_UNCORR_ERR 0x00000200
+#define __PSS_LMEM0_UNCORR_ERR 0x00000100
+#define __PSS_BAL_PERR 0x00000080
+#define __PSS_DIP_IF_ERR 0x00000040
+#define __PSS_IOH_IF_ERR 0x00000020
+#define __PSS_TDS_IF_ERR 0x00000010
+#define __PSS_RDS_IF_ERR 0x00000008
+#define __PSS_SGM_IF_ERR 0x00000004
+#define __PSS_LPU1_RAM_ERR 0x00000002
+#define __PSS_LPU0_RAM_ERR 0x00000001
+#define ERR_SET_REG 0x00018818
+#define __PSS_ERR_STATUS_SET 0x003fffff
+#define PMM_1T_RESET_REG_P0 0x0002381c
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c
+#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
+#define __RXQ0_ADD_VECTORS_P 0x80000000
+#define __RXQ0_STOP_P 0x40000000
+#define __RXQ0_PRD_PTR_P 0x0000ffff
+#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
+#define __RXQ1_ADD_VECTORS_P 0x80000000
+#define __RXQ1_STOP_P 0x40000000
+#define __RXQ1_PRD_PTR_P 0x0000ffff
+#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
+#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
+#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
+#define __TXQ0_ADD_VECTORS_P 0x80000000
+#define __TXQ0_STOP_P 0x40000000
+#define __TXQ0_PRD_PTR_P 0x0000ffff
+#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
+#define __TXQ1_ADD_VECTORS_P 0x80000000
+#define __TXQ1_STOP_P 0x40000000
+#define __TXQ1_PRD_PTR_P 0x0000ffff
+#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
+#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
+#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
+#define __IB1_0_ACK_P 0x80000000
+#define __IB1_0_DISABLE_P 0x40000000
+#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB1_0_COALESCING_CFG_P_SH 16
+#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
+#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
+#define __IB1_1_ACK_P 0x80000000
+#define __IB1_1_DISABLE_P 0x40000000
+#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB1_1_COALESCING_CFG_P_SH 16
+#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
+#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
+#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
+#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
+#define __IB2_0_ACK_P 0x80000000
+#define __IB2_0_DISABLE_P 0x40000000
+#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB2_0_COALESCING_CFG_P_SH 16
+#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
+#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
+#define __IB2_1_ACK_P 0x80000000
+#define __IB2_1_DISABLE_P 0x40000000
+#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
+#define __IB2_1_COALESCING_CFG_P_SH 16
+#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
+#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
+#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
+#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
+
+/*
+ * These definitions are either in error/missing in spec. Its auto-generated
+ * from hard coded values in regparse.pl.
+ */
+#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
+#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
+#define __EMPHPRE_AT_4G_FIX 0x00000003
+#define __SFP_TXRATE_EN_FIX 0x00000100
+#define __SFP_RXRATE_EN_FIX 0x00000080
+
+/*
+ * These register definitions are auto-generated from hard coded values
+ * in regparse.pl.
+ */
+
+/*
+ * These register mapping definitions are auto-generated from mapping tables
+ * in regparse.pl.
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+
+#define CPE_DEPTH_Q(__n) \
+ (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
+#define CPE_QCTRL_Q(__n) \
+ (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
+#define CPE_PI_PTR_Q(__n) \
+ (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
+#define CPE_CI_PTR_Q(__n) \
+ (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
+#define RME_DEPTH_Q(__n) \
+ (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
+#define RME_QCTRL_Q(__n) \
+ (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
+#define RME_PI_PTR_Q(__n) \
+ (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
+#define RME_CI_PTR_Q(__n) \
+ (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
+#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
+ * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
+ * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
+ * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
+ * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
+ * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
+ * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
+ * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
+ * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define CPE_Q_MASK(__q) ((__q) & 0x3)
+#define RME_Q_MASK(__q) ((__q) & 0x3)
+
+/*
+ * PCI MSI-X vector defines
+ */
+enum {
+ BFA_MSIX_CPE_Q0 = 0,
+ BFA_MSIX_CPE_Q1 = 1,
+ BFA_MSIX_CPE_Q2 = 2,
+ BFA_MSIX_CPE_Q3 = 3,
+ BFA_MSIX_RME_Q0 = 4,
+ BFA_MSIX_RME_Q1 = 5,
+ BFA_MSIX_RME_Q2 = 6,
+ BFA_MSIX_RME_Q3 = 7,
+ BFA_MSIX_LPU_ERR = 8,
+ BFA_MSIX_CT_MAX = 9,
+};
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+
+/*
+ * catapult memory map.
+ */
+#define LL_PGN_HQM0 0x0096
+#define LL_PGN_HQM1 0x0097
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+/*
+ * End of catapult memory map
+ */
+
+#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/net/bna/bfi_ll.h b/drivers/net/bna/bfi_ll.h
new file mode 100644
index 000000000000..bee4d054066a
--- /dev/null
+++ b/drivers/net/bna/bfi_ll.h
@@ -0,0 +1,438 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BFI_LL_H__
+#define __BFI_LL_H__
+
+#include "bfi.h"
+
+#pragma pack(1)
+
+/**
+ * @brief
+ * "enums" for all LL mailbox messages other than IOC
+ */
+enum {
+ BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
+ BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
+ BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
+
+ BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
+ BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
+ BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
+ BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
+
+ BFI_LL_H2I_PORT_ADMIN_REQ = 8,
+ BFI_LL_H2I_STATS_GET_REQ = 9,
+ BFI_LL_H2I_STATS_CLEAR_REQ = 10,
+
+ BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
+ BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
+
+ BFI_LL_H2I_TXQ_STOP_REQ = 13,
+ BFI_LL_H2I_RXQ_STOP_REQ = 14,
+
+ BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
+
+ BFI_LL_H2I_SET_PAUSE_REQ = 16,
+ BFI_LL_H2I_MTU_INFO_REQ = 17,
+
+ BFI_LL_H2I_RX_REQ = 18,
+} ;
+
+enum {
+ BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
+ BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
+ BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
+
+ BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
+ BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
+ BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
+ BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
+
+ BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
+ BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
+ BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
+
+ BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
+ BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
+
+ BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
+ BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
+
+ BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
+
+ BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
+
+ BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
+ BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
+
+ BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
+ BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
+
+ BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
+ BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
+} ;
+
+/**
+ * @brief bfi_ll_mac_addr_req is used by:
+ * BFI_LL_H2I_MAC_UCAST_SET_REQ
+ * BFI_LL_H2I_MAC_UCAST_ADD_REQ
+ * BFI_LL_H2I_MAC_UCAST_DEL_REQ
+ * BFI_LL_H2I_MAC_MCAST_ADD_REQ
+ * BFI_LL_H2I_MAC_MCAST_DEL_REQ
+ */
+struct bfi_ll_mac_addr_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 rsvd1[3];
+ mac_t mac_addr;
+ u8 rsvd2[2];
+};
+
+/**
+ * @brief bfi_ll_mcast_filter_req is used by:
+ * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
+ */
+struct bfi_ll_mcast_filter_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 enable;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_mcast_del_all is used by:
+ * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
+ */
+struct bfi_ll_mcast_del_all_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_q_stop_req is used by:
+ * BFI_LL_H2I_TXQ_STOP_REQ
+ * BFI_LL_H2I_RXQ_STOP_REQ
+ */
+struct bfi_ll_q_stop_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 q_id_mask[2]; /* !< bit-mask for queue ids */
+};
+
+/**
+ * @brief bfi_ll_stats_req is used by:
+ * BFI_LL_I2H_STATS_GET_REQ
+ * BFI_LL_I2H_STATS_CLEAR_REQ
+ */
+struct bfi_ll_stats_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u16 stats_mask; /* !< bit-mask for non-function statistics */
+ u8 rsvd[2];
+ u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
+ u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
+ union bfi_addr_u host_buffer; /* !< where statistics are returned */
+};
+
+/**
+ * @brief defines for "stats_mask" above.
+ */
+#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
+#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
+#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
+#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
+#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+
+#define BFI_LL_STATS_ALL 0x1f
+
+/**
+ * @brief bfi_ll_port_admin_req
+ */
+struct bfi_ll_port_admin_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 up;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_rxf_req is used by:
+ * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
+ * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
+ */
+struct bfi_ll_rxf_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 rxf_id;
+ u8 enable;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_rxf_multi_req is used by:
+ * BFI_LL_H2I_RX_REQ
+ */
+struct bfi_ll_rxf_multi_req {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 rxf_id_mask[2];
+ u8 enable;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief enum for Loopback opmodes
+ */
+enum {
+ BFI_LL_DIAG_LB_OPMODE_EXT = 0,
+ BFI_LL_DIAG_LB_OPMODE_CBL = 1,
+};
+
+/**
+ * @brief bfi_ll_set_pause_req is used by:
+ * BFI_LL_H2I_SET_PAUSE_REQ
+ */
+struct bfi_ll_set_pause_req {
+ struct bfi_mhdr mh;
+ u8 tx_pause; /* 1 = enable, 0 = disable */
+ u8 rx_pause; /* 1 = enable, 0 = disable */
+ u8 rsvd[2];
+};
+
+/**
+ * @brief bfi_ll_mtu_info_req is used by:
+ * BFI_LL_H2I_MTU_INFO_REQ
+ */
+struct bfi_ll_mtu_info_req {
+ struct bfi_mhdr mh;
+ u16 mtu;
+ u8 rsvd[2];
+};
+
+/**
+ * @brief
+ * Response header format used by all responses
+ * For both responses and asynchronous notifications
+ */
+struct bfi_ll_rsp {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u8 error;
+ u8 rsvd[3];
+};
+
+/**
+ * @brief bfi_ll_cee_aen is used by:
+ * BFI_LL_I2H_LINK_DOWN_AEN
+ * BFI_LL_I2H_LINK_UP_AEN
+ */
+struct bfi_ll_aen {
+ struct bfi_mhdr mh; /*!< common msg header */
+ u32 reason;
+ u8 cee_linkup;
+ u8 prio_map; /*!< LL priority bit-map */
+ u8 rsvd[2];
+};
+
+/**
+ * @brief
+ * The following error codes can be returned
+ * by the mbox commands
+ */
+enum {
+ BFI_LL_CMD_OK = 0,
+ BFI_LL_CMD_FAIL = 1,
+ BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
+ BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
+ BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
+ BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
+ BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
+ BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
+} ;
+
+/* Statistics */
+#define BFI_LL_TXF_ID_MAX 64
+#define BFI_LL_RXF_ID_MAX 64
+
+/* TxF Frame Statistics */
+struct bfi_ll_stats_txf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+
+ u64 errors;
+ u64 filter_vlan; /* frames filtered due to VLAN */
+ u64 filter_mac_sa; /* frames filtered due to SA check */
+};
+
+/* RxF Frame Statistics */
+struct bfi_ll_stats_rxf {
+ u64 ucast_octets;
+ u64 ucast;
+ u64 ucast_vlan;
+
+ u64 mcast_octets;
+ u64 mcast;
+ u64 mcast_vlan;
+
+ u64 bcast_octets;
+ u64 bcast;
+ u64 bcast_vlan;
+ u64 frame_drops;
+};
+
+/* FC Tx Frame Statistics */
+struct bfi_ll_stats_fc_tx {
+ u64 txf_ucast_octets;
+ u64 txf_ucast;
+ u64 txf_ucast_vlan;
+
+ u64 txf_mcast_octets;
+ u64 txf_mcast;
+ u64 txf_mcast_vlan;
+
+ u64 txf_bcast_octets;
+ u64 txf_bcast;
+ u64 txf_bcast_vlan;
+
+ u64 txf_parity_errors;
+ u64 txf_timeout;
+ u64 txf_fid_parity_errors;
+};
+
+/* FC Rx Frame Statistics */
+struct bfi_ll_stats_fc_rx {
+ u64 rxf_ucast_octets;
+ u64 rxf_ucast;
+ u64 rxf_ucast_vlan;
+
+ u64 rxf_mcast_octets;
+ u64 rxf_mcast;
+ u64 rxf_mcast_vlan;
+
+ u64 rxf_bcast_octets;
+ u64 rxf_bcast;
+ u64 rxf_bcast_vlan;
+};
+
+/* RAD Frame Statistics */
+struct bfi_ll_stats_rad {
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_vlan_frames;
+
+ u64 rx_ucast;
+ u64 rx_ucast_octets;
+ u64 rx_ucast_vlan;
+
+ u64 rx_mcast;
+ u64 rx_mcast_octets;
+ u64 rx_mcast_vlan;
+
+ u64 rx_bcast;
+ u64 rx_bcast_octets;
+ u64 rx_bcast_vlan;
+
+ u64 rx_drops;
+};
+
+/* BPC Tx Registers */
+struct bfi_ll_stats_bpc {
+ /* transmit stats */
+ u64 tx_pause[8];
+ u64 tx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 tx_first_pause[8];
+
+ /* receive stats */
+ u64 rx_pause[8];
+ u64 rx_zero_pause[8]; /*!< Pause cancellation */
+ /*!<Pause initiation rather than retention */
+ u64 rx_first_pause[8];
+};
+
+/* MAC Rx Statistics */
+struct bfi_ll_stats_mac {
+ u64 frame_64; /* both rx and tx counter */
+ u64 frame_65_127; /* both rx and tx counter */
+ u64 frame_128_255; /* both rx and tx counter */
+ u64 frame_256_511; /* both rx and tx counter */
+ u64 frame_512_1023; /* both rx and tx counter */
+ u64 frame_1024_1518; /* both rx and tx counter */
+ u64 frame_1519_1522; /* both rx and tx counter */
+
+ /* receive stats */
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_fcs_error;
+ u64 rx_multicast;
+ u64 rx_broadcast;
+ u64 rx_control_frames;
+ u64 rx_pause;
+ u64 rx_unknown_opcode;
+ u64 rx_alignment_error;
+ u64 rx_frame_length_error;
+ u64 rx_code_error;
+ u64 rx_carrier_sense_error;
+ u64 rx_undersize;
+ u64 rx_oversize;
+ u64 rx_fragments;
+ u64 rx_jabber;
+ u64 rx_drop;
+
+ /* transmit stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_multicast;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_deferral;
+ u64 tx_excessive_deferral;
+ u64 tx_single_collision;
+ u64 tx_muliple_collision;
+ u64 tx_late_collision;
+ u64 tx_excessive_collision;
+ u64 tx_total_collision;
+ u64 tx_pause_honored;
+ u64 tx_drop;
+ u64 tx_jabber;
+ u64 tx_fcs_error;
+ u64 tx_control_frame;
+ u64 tx_oversize;
+ u64 tx_undersize;
+ u64 tx_fragments;
+};
+
+/* Complete statistics */
+struct bfi_ll_stats {
+ struct bfi_ll_stats_mac mac_stats;
+ struct bfi_ll_stats_bpc bpc_stats;
+ struct bfi_ll_stats_rad rad_stats;
+ struct bfi_ll_stats_fc_rx fc_rx_stats;
+ struct bfi_ll_stats_fc_tx fc_tx_stats;
+ struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
+ struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
+};
+
+#pragma pack()
+
+#endif /* __BFI_LL_H__ */
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
new file mode 100644
index 000000000000..6a2b3291c190
--- /dev/null
+++ b/drivers/net/bna/bna.h
@@ -0,0 +1,654 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BNA_H__
+#define __BNA_H__
+
+#include "bfa_wc.h"
+#include "bfa_ioc.h"
+#include "cna.h"
+#include "bfi_ll.h"
+#include "bna_types.h"
+
+extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
+extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+
+/**
+ *
+ * Macros and constants
+ *
+ */
+
+#define BNA_IOC_TIMER_FREQ 200
+
+/* Log string size */
+#define BNA_MESSAGE_SIZE 256
+
+#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
+
+/* MBOX API for PORT, TX, RX */
+#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
+do { \
+ memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
+ (_qe)->cbfn = (_cbfn); \
+ (_qe)->cbarg = (_cbarg); \
+} while (0)
+
+#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
+
+#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
+ (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
+
+#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
+
+#define BNA_TO_POWER_OF_2(x) \
+do { \
+ int _shift = 0; \
+ while ((x) && (x) != 1) { \
+ (x) >>= 1; \
+ _shift++; \
+ } \
+ (x) <<= _shift; \
+} while (0)
+
+#define BNA_TO_POWER_OF_2_HIGH(x) \
+do { \
+ int n = 1; \
+ while (n < (x)) \
+ n <<= 1; \
+ (x) = n; \
+} while (0)
+
+/*
+ * input : _addr-> os dma addr in host endian format,
+ * output : _bna_dma_addr-> pointer to hw dma addr
+ */
+#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
+do { \
+ u64 tmp_addr = \
+ cpu_to_be64((u64)(_addr)); \
+ (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
+ (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
+} while (0)
+
+/*
+ * input : _bna_dma_addr-> pointer to hw dma addr
+ * output : _addr-> os dma addr in host endian format
+ */
+#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
+do { \
+ (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
+ | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
+} while (0)
+
+#define containing_rec(addr, type, field) \
+ ((type *)((unsigned char *)(addr) - \
+ (unsigned char *)(&((type *)0)->field)))
+
+#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
+
+/* TxQ element is 64 bytes */
+#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
+#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
+
+#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
+ (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
+}
+
+/* RxQ element is 8 bytes */
+#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
+#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
+
+#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
+}
+
+/* CQ element is 16 bytes */
+#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
+#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
+
+#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
+{ \
+ unsigned int page_index; /* index within a page */ \
+ void *page_addr; \
+ \
+ page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
+ page_addr = (_qpt_ptr)[((_qe_idx) >> \
+ BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
+ (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
+}
+
+#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
+ (&((_cast *)(_q_base))[(_qe_idx)])
+
+#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
+
+#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
+ ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
+
+#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
+ (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
+
+#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
+ (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
+ ((_q_depth) - 1))
+
+#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
+ ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
+ (_q_depth - 1))
+
+#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
+
+#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
+
+#define BNA_Q_PI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.producer_index = \
+ (((_q_ptr)->q.producer_index + (_num)) & \
+ ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
+ (_q_ptr)->q.consumer_index = \
+ (((_q_ptr)->q.consumer_index + (_num)) \
+ & ((_q_ptr)->q.q_depth - 1))
+
+#define BNA_Q_FREE_COUNT(_q_ptr) \
+ (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
+
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+ (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
+
+/* These macros build the data portion of the TxQ/RxQ doorbell */
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_STOP (0x40000000)
+
+/* These macros build the data portion of the IB doorbell */
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* Set the coalescing timer for the given ib */
+#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
+ ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
+
+/* Acks 'events' # of events for a given ib */
+#define bna_ib_ack(_i_dbell, _events) \
+ (writel(((_i_dbell)->doorbell_ack | (_events)), \
+ (_i_dbell)->doorbell_addr));
+
+#define bna_txq_prod_indx_doorbell(_tcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
+ (_tcb)->q_dbell));
+
+#define bna_rxq_prod_indx_doorbell(_rcb) \
+ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
+ (_rcb)->q_dbell));
+
+#define BNA_LARGE_PKT_SIZE 1000
+
+#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
+do { \
+ if ((_len) > BNA_LARGE_PKT_SIZE) { \
+ (_pkt)->large_pkt_cnt++; \
+ } else { \
+ (_pkt)->small_pkt_cnt++; \
+ } \
+} while (0)
+
+#define call_rxf_stop_cbfn(rxf, status) \
+ if ((rxf)->stop_cbfn) { \
+ (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
+ (rxf)->stop_cbfn = NULL; \
+ (rxf)->stop_cbarg = NULL; \
+ }
+
+#define call_rxf_start_cbfn(rxf, status) \
+ if ((rxf)->start_cbfn) { \
+ (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
+ (rxf)->start_cbfn = NULL; \
+ (rxf)->start_cbarg = NULL; \
+ }
+
+#define call_rxf_cam_fltr_cbfn(rxf, status) \
+ if ((rxf)->cam_fltr_cbfn) { \
+ (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
+ (status)); \
+ (rxf)->cam_fltr_cbfn = NULL; \
+ (rxf)->cam_fltr_cbarg = NULL; \
+ }
+
+#define call_rxf_pause_cbfn(rxf, status) \
+ if ((rxf)->oper_state_cbfn) { \
+ (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
+ (status)); \
+ (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
+ (rxf)->oper_state_cbfn = NULL; \
+ (rxf)->oper_state_cbarg = NULL; \
+ }
+
+#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
+
+#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
+
+#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
+
+#define xxx_enable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode |= xxx; \
+} while (0)
+
+#define xxx_disable(mode, bitmask, xxx) \
+do { \
+ bitmask |= xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define xxx_inactive(mode, bitmask, xxx) \
+do { \
+ bitmask &= ~xxx; \
+ mode &= ~xxx; \
+} while (0)
+
+#define is_promisc_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_promisc_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define promisc_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
+
+#define is_default_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_default_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define default_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
+
+#define is_allmulti_enable(mode, bitmask) \
+ is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define is_allmulti_disable(mode, bitmask) \
+ is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_enable(mode, bitmask) \
+ xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_disable(mode, bitmask) \
+ xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define allmulti_inactive(mode, bitmask) \
+ xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
+
+#define GET_RXQS(rxp, q0, q1) do { \
+ switch ((rxp)->type) { \
+ case BNA_RXP_SINGLE: \
+ (q0) = rxp->rxq.single.only; \
+ (q1) = NULL; \
+ break; \
+ case BNA_RXP_SLR: \
+ (q0) = rxp->rxq.slr.large; \
+ (q1) = rxp->rxq.slr.small; \
+ break; \
+ case BNA_RXP_HDS: \
+ (q0) = rxp->rxq.hds.data; \
+ (q1) = rxp->rxq.hds.hdr; \
+ break; \
+ } \
+} while (0)
+
+/**
+ *
+ * Function prototypes
+ *
+ */
+
+/**
+ * BNA
+ */
+
+/* Internal APIs */
+void bna_adv_res_req(struct bna_res_info *res_info);
+
+/* APIs for BNAD */
+void bna_res_req(struct bna_res_info *res_info);
+void bna_init(struct bna *bna, struct bnad *bnad,
+ struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info);
+void bna_uninit(struct bna *bna);
+void bna_stats_get(struct bna *bna);
+void bna_stats_clr(struct bna *bna);
+void bna_get_perm_mac(struct bna *bna, u8 *mac);
+
+/* APIs for Rx */
+int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
+
+/* APIs for RxF */
+struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
+void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
+ struct bna_mac *mac);
+struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
+void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
+ struct bna_mac *mac);
+struct bna_rit_segment *
+bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
+void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+ struct bna_rit_segment *seg);
+
+/**
+ * DEVICE
+ */
+
+/* Interanl APIs */
+void bna_adv_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info);
+
+/* APIs for BNA */
+void bna_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_device_uninit(struct bna_device *device);
+void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
+int bna_device_status_get(struct bna_device *device);
+int bna_device_state_get(struct bna_device *device);
+
+/* APIs for BNAD */
+void bna_device_enable(struct bna_device *device);
+void bna_device_disable(struct bna_device *device,
+ enum bna_cleanup_type type);
+
+/**
+ * MBOX
+ */
+
+/* APIs for DEVICE */
+void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
+void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
+void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
+void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
+
+/* APIs for PORT, TX, RX */
+void bna_mbox_handler(struct bna *bna, u32 intr_status);
+void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
+
+/**
+ * PORT
+ */
+
+/* APIs for BNA */
+void bna_port_init(struct bna_port *port, struct bna *bna);
+void bna_port_uninit(struct bna_port *port);
+int bna_port_state_get(struct bna_port *port);
+int bna_llport_state_get(struct bna_llport *llport);
+
+/* APIs for DEVICE */
+void bna_port_start(struct bna_port *port);
+void bna_port_stop(struct bna_port *port);
+void bna_port_fail(struct bna_port *port);
+
+/* API for RX */
+int bna_port_mtu_get(struct bna_port *port);
+void bna_llport_admin_up(struct bna_llport *llport);
+void bna_llport_admin_down(struct bna_llport *llport);
+
+/* API for BNAD */
+void bna_port_enable(struct bna_port *port);
+void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
+ void (*cbfn)(void *, enum bna_cb_status));
+void bna_port_pause_config(struct bna_port *port,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *, enum bna_cb_status));
+void bna_port_mtu_set(struct bna_port *port, int mtu,
+ void (*cbfn)(struct bnad *, enum bna_cb_status));
+void bna_port_mac_get(struct bna_port *port, mac_t *mac);
+void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
+void bna_port_linkcbfn_set(struct bna_port *port,
+ void (*linkcbfn)(struct bnad *,
+ enum bna_link_status));
+void bna_port_admin_up(struct bna_port *port);
+void bna_port_admin_down(struct bna_port *port);
+
+/* Callbacks for TX, RX */
+void bna_port_cb_tx_stopped(struct bna_port *port,
+ enum bna_cb_status status);
+void bna_port_cb_rx_stopped(struct bna_port *port,
+ enum bna_cb_status status);
+
+/* Callbacks for MBOX */
+void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+ int status);
+void bna_port_cb_link_down(struct bna_port *port, int status);
+
+/**
+ * IB
+ */
+
+/* APIs for BNA */
+void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
+
+/* APIs for TX, RX */
+struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
+ enum bna_intr_type intr_type, int vector);
+void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
+int bna_ib_reserve_idx(struct bna_ib *ib);
+void bna_ib_release_idx(struct bna_ib *ib, int idx);
+int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
+void bna_ib_start(struct bna_ib *ib);
+void bna_ib_stop(struct bna_ib *ib);
+void bna_ib_fail(struct bna_ib *ib);
+void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
+
+/**
+ * TX MODULE AND TX
+ */
+
+/* Internal APIs */
+void bna_tx_prio_changed(struct bna_tx *tx, int prio);
+
+/* APIs for BNA */
+void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
+int bna_tx_state_get(struct bna_tx *tx);
+
+/* APIs for PORT */
+void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
+void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
+void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
+void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
+
+/* APIs for BNAD */
+void bna_tx_res_req(int num_txq, int txq_depth,
+ struct bna_res_info *res_info);
+struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_tx_destroy(struct bna_tx *tx);
+void bna_tx_enable(struct bna_tx *tx);
+void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_tx_prio_set(struct bna_tx *tx, int prio,
+ void (*cbfn)(struct bnad *, struct bna_tx *,
+ enum bna_cb_status));
+void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
+
+/**
+ * RX MODULE, RX, RXF
+ */
+
+/* Internal APIs */
+void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
+void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
+ const struct bna_mac *mac_addr);
+void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
+void bna_rxf_adv_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config);
+int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
+int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
+int rxf_process_packet_filter_default(struct bna_rxf *rxf);
+int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
+int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
+void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
+
+/* APIs for BNA */
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info);
+void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
+int bna_rx_state_get(struct bna_rx *rx);
+int bna_rxf_state_get(struct bna_rxf *rxf);
+
+/* APIs for PORT */
+void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
+void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
+
+/* APIs for BNAD */
+void bna_rx_res_req(struct bna_rx_config *rx_config,
+ struct bna_res_info *res_info);
+struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info, void *priv);
+void bna_rx_destroy(struct bna_rx *rx);
+void bna_rx_enable(struct bna_rx *rx);
+void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
+void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
+void bna_rx_dim_update(struct bna_ccb *ccb);
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
+void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+void bna_rx_vlanfilter_disable(struct bna_rx *rx);
+void bna_rx_rss_enable(struct bna_rx *rx);
+void bna_rx_rss_disable(struct bna_rx *rx);
+void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
+void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
+ int nvectors);
+void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_hds_disable(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_receive_pause(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+void bna_rx_receive_resume(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status));
+
+/* RxF APIs for RX */
+void bna_rxf_start(struct bna_rxf *rxf);
+void bna_rxf_stop(struct bna_rxf *rxf);
+void bna_rxf_fail(struct bna_rxf *rxf);
+void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
+ struct bna_rx_config *q_config);
+void bna_rxf_uninit(struct bna_rxf *rxf);
+
+/* Callback from RXF to RX */
+void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
+void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
+
+/**
+ * BNAD
+ */
+
+/* Callbacks for BNA */
+void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats);
+void bnad_cb_stats_clr(struct bnad *bnad);
+
+/* Callbacks for DEVICE */
+void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
+void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
+void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
+void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
+
+/* Callbacks for port */
+void bnad_cb_port_link_status(struct bnad *bnad,
+ enum bna_link_status status);
+
+#endif /* __BNA_H__ */
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
new file mode 100644
index 000000000000..f3034d6bda58
--- /dev/null
+++ b/drivers/net/bna/bna_ctrl.c
@@ -0,0 +1,3624 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfa_sm.h"
+#include "bfa_wc.h"
+
+/**
+ * MBOX
+ */
+static int
+bna_is_aen(u8 msg_id)
+{
+ return (msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
+ msg_id == BFI_LL_I2H_LINK_UP_AEN);
+}
+
+static void
+bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
+{
+ struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
+
+ switch (aen->mh.msg_id) {
+ case BFI_LL_I2H_LINK_UP_AEN:
+ bna_port_cb_link_up(&bna->port, aen, aen->reason);
+ break;
+ case BFI_LL_I2H_LINK_DOWN_AEN:
+ bna_port_cb_link_down(&bna->port, aen->reason);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
+{
+ struct bna *bna = (struct bna *)(llarg);
+ struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
+ struct bfi_mhdr *cmd_h, *rsp_h;
+ struct bna_mbox_qe *mb_qe = NULL;
+ int to_post = 0;
+ u8 aen = 0;
+ char message[BNA_MESSAGE_SIZE];
+
+ aen = bna_is_aen(mb_rsp->mh.msg_id);
+
+ if (!aen) {
+ mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+ cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
+ rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
+
+ if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
+ (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
+ /* Remove the request from posted_q, update state */
+ list_del(&mb_qe->qe);
+ bna->mbox_mod.msg_pending--;
+ if (list_empty(&bna->mbox_mod.posted_q))
+ bna->mbox_mod.state = BNA_MBOX_FREE;
+ else
+ to_post = 1;
+
+ /* Dispatch the cbfn */
+ if (mb_qe->cbfn)
+ mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
+
+ /* Post the next entry, if needed */
+ if (to_post) {
+ mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+ bfa_nw_ioc_mbox_queue(&bna->device.ioc,
+ &mb_qe->cmd);
+ }
+ } else {
+ snprintf(message, BNA_MESSAGE_SIZE,
+ "No matching rsp for [%d:%d:%d]\n",
+ mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
+ mb_rsp->mh.mtag.i2htok);
+ pr_info("%s", message);
+ }
+
+ } else
+ bna_mbox_aen_callback(bna, msg);
+}
+
+void
+bna_err_handler(struct bna *bna, u32 intr_status)
+{
+ u32 init_halt;
+
+ if (intr_status & __HALT_STATUS_BITS) {
+ init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
+ init_halt &= ~__FW_INIT_HALT_P;
+ writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
+ }
+
+ bfa_nw_ioc_error_isr(&bna->device.ioc);
+}
+
+void
+bna_mbox_handler(struct bna *bna, u32 intr_status)
+{
+ if (BNA_IS_ERR_INTR(intr_status)) {
+ bna_err_handler(bna, intr_status);
+ return;
+ }
+ if (BNA_IS_MBOX_INTR(intr_status))
+ bfa_nw_ioc_mbox_isr(&bna->device.ioc);
+}
+
+void
+bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
+{
+ struct bfi_mhdr *mh;
+
+ mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
+
+ mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
+ bna->mbox_mod.msg_ctr++;
+ bna->mbox_mod.msg_pending++;
+ if (bna->mbox_mod.state == BNA_MBOX_FREE) {
+ list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+ bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
+ bna->mbox_mod.state = BNA_MBOX_POSTED;
+ } else {
+ list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+ }
+}
+
+void
+bna_mbox_flush_q(struct bna *bna, struct list_head *q)
+{
+ struct bna_mbox_qe *mb_qe = NULL;
+ struct bfi_mhdr *cmd_h;
+ struct list_head *mb_q;
+ void (*cbfn)(void *arg, int status);
+ void *cbarg;
+
+ mb_q = &bna->mbox_mod.posted_q;
+
+ while (!list_empty(mb_q)) {
+ bfa_q_deq(mb_q, &mb_qe);
+ cbfn = mb_qe->cbfn;
+ cbarg = mb_qe->cbarg;
+ bfa_q_qe_init(mb_qe);
+ bna->mbox_mod.msg_pending--;
+
+ cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
+ if (cbfn)
+ cbfn(cbarg, BNA_CB_NOT_EXEC);
+ }
+
+ bna->mbox_mod.state = BNA_MBOX_FREE;
+}
+
+void
+bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
+{
+}
+
+void
+bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
+{
+ bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
+}
+
+void
+bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
+{
+ bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
+ mbox_mod->state = BNA_MBOX_FREE;
+ mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
+ INIT_LIST_HEAD(&mbox_mod->posted_q);
+ mbox_mod->bna = bna;
+}
+
+void
+bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
+{
+ mbox_mod->bna = NULL;
+}
+
+/**
+ * LLPORT
+ */
+#define call_llport_stop_cbfn(llport, status)\
+do {\
+ if ((llport)->stop_cbfn)\
+ (llport)->stop_cbfn(&(llport)->bna->port, status);\
+ (llport)->stop_cbfn = NULL;\
+} while (0)
+
+static void bna_fw_llport_up(struct bna_llport *llport);
+static void bna_fw_cb_llport_up(void *arg, int status);
+static void bna_fw_llport_down(struct bna_llport *llport);
+static void bna_fw_cb_llport_down(void *arg, int status);
+static void bna_llport_start(struct bna_llport *llport);
+static void bna_llport_stop(struct bna_llport *llport);
+static void bna_llport_fail(struct bna_llport *llport);
+
+enum bna_llport_event {
+ LLPORT_E_START = 1,
+ LLPORT_E_STOP = 2,
+ LLPORT_E_FAIL = 3,
+ LLPORT_E_UP = 4,
+ LLPORT_E_DOWN = 5,
+ LLPORT_E_FWRESP_UP = 6,
+ LLPORT_E_FWRESP_DOWN = 7
+};
+
+enum bna_llport_state {
+ BNA_LLPORT_STOPPED = 1,
+ BNA_LLPORT_DOWN = 2,
+ BNA_LLPORT_UP_RESP_WAIT = 3,
+ BNA_LLPORT_DOWN_RESP_WAIT = 4,
+ BNA_LLPORT_UP = 5,
+ BNA_LLPORT_LAST_RESP_WAIT = 6
+};
+
+bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
+ enum bna_llport_event);
+bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
+ enum bna_llport_event);
+
+static struct bfa_sm_table llport_sm_table[] = {
+ {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
+ {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
+ {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
+ {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
+ {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
+ {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
+};
+
+static void
+bna_llport_sm_stopped_entry(struct bna_llport *llport)
+{
+ llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
+ call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
+}
+
+static void
+bna_llport_sm_stopped(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_START:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
+ case LLPORT_E_STOP:
+ call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
+ break;
+
+ case LLPORT_E_FAIL:
+ break;
+
+ case LLPORT_E_DOWN:
+ /* This event is received due to Rx objects failing */
+ /* No-op */
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_DOWN:
+ /**
+ * These events are received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_down_entry(struct bna_llport *llport)
+{
+ bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
+}
+
+static void
+bna_llport_sm_down(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ bna_fw_llport_up(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
+{
+ /**
+ * NOTE: Do not call bna_fw_llport_up() here. That will over step
+ * mbox due to down_resp_wait -> up_resp_wait transition on event
+ * LLPORT_E_UP
+ */
+}
+
+static void
+bna_llport_sm_up_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
+ bna_fw_llport_up(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
+{
+ /**
+ * NOTE: Do not call bna_fw_llport_down() here. That will over step
+ * mbox due to up_resp_wait -> down_resp_wait transition on event
+ * LLPORT_E_DOWN
+ */
+}
+
+static void
+bna_llport_sm_down_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_UP:
+ bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_up_entry(struct bna_llport *llport)
+{
+}
+
+static void
+bna_llport_sm_up(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_STOP:
+ bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ bna_fw_llport_down(llport);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
+{
+}
+
+static void
+bna_llport_sm_last_resp_wait(struct bna_llport *llport,
+ enum bna_llport_event event)
+{
+ switch (event) {
+ case LLPORT_E_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ case LLPORT_E_DOWN:
+ /**
+ * This event is received due to Rx objects stopping in
+ * parallel to llport
+ */
+ /* No-op */
+ break;
+
+ case LLPORT_E_FWRESP_UP:
+ /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
+ bna_fw_llport_down(llport);
+ break;
+
+ case LLPORT_E_FWRESP_DOWN:
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(llport->bna, event);
+ }
+}
+
+static void
+bna_fw_llport_admin_up(struct bna_llport *llport)
+{
+ struct bfi_ll_port_admin_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.up = BNA_STATUS_T_ENABLED;
+
+ bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_llport_up, llport);
+
+ bna_mbox_send(llport->bna, &llport->mbox_qe);
+}
+
+static void
+bna_fw_llport_up(struct bna_llport *llport)
+{
+ if (llport->type == BNA_PORT_T_REGULAR)
+ bna_fw_llport_admin_up(llport);
+}
+
+static void
+bna_fw_cb_llport_up(void *arg, int status)
+{
+ struct bna_llport *llport = (struct bna_llport *)arg;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
+}
+
+static void
+bna_fw_llport_admin_down(struct bna_llport *llport)
+{
+ struct bfi_ll_port_admin_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.up = BNA_STATUS_T_DISABLED;
+
+ bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_llport_down, llport);
+
+ bna_mbox_send(llport->bna, &llport->mbox_qe);
+}
+
+static void
+bna_fw_llport_down(struct bna_llport *llport)
+{
+ if (llport->type == BNA_PORT_T_REGULAR)
+ bna_fw_llport_admin_down(llport);
+}
+
+static void
+bna_fw_cb_llport_down(void *arg, int status)
+{
+ struct bna_llport *llport = (struct bna_llport *)arg;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
+}
+
+void
+bna_port_cb_llport_stopped(struct bna_port *port,
+ enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+static void
+bna_llport_init(struct bna_llport *llport, struct bna *bna)
+{
+ llport->flags |= BNA_LLPORT_F_ENABLED;
+ llport->type = BNA_PORT_T_REGULAR;
+ llport->bna = bna;
+
+ llport->link_status = BNA_LINK_DOWN;
+
+ llport->admin_up_count = 0;
+
+ llport->stop_cbfn = NULL;
+
+ bfa_q_qe_init(&llport->mbox_qe.qe);
+
+ bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+}
+
+static void
+bna_llport_uninit(struct bna_llport *llport)
+{
+ llport->flags &= ~BNA_LLPORT_F_ENABLED;
+
+ llport->bna = NULL;
+}
+
+static void
+bna_llport_start(struct bna_llport *llport)
+{
+ bfa_fsm_send_event(llport, LLPORT_E_START);
+}
+
+static void
+bna_llport_stop(struct bna_llport *llport)
+{
+ llport->stop_cbfn = bna_port_cb_llport_stopped;
+
+ bfa_fsm_send_event(llport, LLPORT_E_STOP);
+}
+
+static void
+bna_llport_fail(struct bna_llport *llport)
+{
+ bfa_fsm_send_event(llport, LLPORT_E_FAIL);
+}
+
+int
+bna_llport_state_get(struct bna_llport *llport)
+{
+ return bfa_sm_to_state(llport_sm_table, llport->fsm);
+}
+
+void
+bna_llport_admin_up(struct bna_llport *llport)
+{
+ llport->admin_up_count++;
+
+ if (llport->admin_up_count == 1) {
+ llport->flags |= BNA_LLPORT_F_RX_ENABLED;
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_UP);
+ }
+}
+
+void
+bna_llport_admin_down(struct bna_llport *llport)
+{
+ llport->admin_up_count--;
+
+ if (llport->admin_up_count == 0) {
+ llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+ }
+}
+
+/**
+ * PORT
+ */
+#define bna_port_chld_start(port)\
+do {\
+ enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bna_llport_start(&(port)->llport);\
+ bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
+ bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_chld_stop(port)\
+do {\
+ enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bna_llport_stop(&(port)->llport);\
+ bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
+ bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_chld_fail(port)\
+do {\
+ bna_llport_fail(&(port)->llport);\
+ bna_tx_mod_fail(&(port)->bna->tx_mod);\
+ bna_rx_mod_fail(&(port)->bna->rx_mod);\
+} while (0)
+
+#define bna_port_rx_start(port)\
+do {\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define bna_port_rx_stop(port)\
+do {\
+ enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
+ BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
+ bfa_wc_up(&(port)->chld_stop_wc);\
+ bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
+} while (0)
+
+#define call_port_stop_cbfn(port, status)\
+do {\
+ if ((port)->stop_cbfn)\
+ (port)->stop_cbfn((port)->stop_cbarg, status);\
+ (port)->stop_cbfn = NULL;\
+ (port)->stop_cbarg = NULL;\
+} while (0)
+
+#define call_port_pause_cbfn(port, status)\
+do {\
+ if ((port)->pause_cbfn)\
+ (port)->pause_cbfn((port)->bna->bnad, status);\
+ (port)->pause_cbfn = NULL;\
+} while (0)
+
+#define call_port_mtu_cbfn(port, status)\
+do {\
+ if ((port)->mtu_cbfn)\
+ (port)->mtu_cbfn((port)->bna->bnad, status);\
+ (port)->mtu_cbfn = NULL;\
+} while (0)
+
+static void bna_fw_pause_set(struct bna_port *port);
+static void bna_fw_cb_pause_set(void *arg, int status);
+static void bna_fw_mtu_set(struct bna_port *port);
+static void bna_fw_cb_mtu_set(void *arg, int status);
+
+enum bna_port_event {
+ PORT_E_START = 1,
+ PORT_E_STOP = 2,
+ PORT_E_FAIL = 3,
+ PORT_E_PAUSE_CFG = 4,
+ PORT_E_MTU_CFG = 5,
+ PORT_E_CHLD_STOPPED = 6,
+ PORT_E_FWRESP_PAUSE = 7,
+ PORT_E_FWRESP_MTU = 8
+};
+
+enum bna_port_state {
+ BNA_PORT_STOPPED = 1,
+ BNA_PORT_MTU_INIT_WAIT = 2,
+ BNA_PORT_PAUSE_INIT_WAIT = 3,
+ BNA_PORT_LAST_RESP_WAIT = 4,
+ BNA_PORT_STARTED = 5,
+ BNA_PORT_PAUSE_CFG_WAIT = 6,
+ BNA_PORT_RX_STOP_WAIT = 7,
+ BNA_PORT_MTU_CFG_WAIT = 8,
+ BNA_PORT_CHLD_STOP_WAIT = 9
+};
+
+bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, started, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
+ enum bna_port_event);
+bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
+ enum bna_port_event);
+
+static struct bfa_sm_table port_sm_table[] = {
+ {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
+ {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
+ {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
+ {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
+ {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
+ {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
+ {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
+ {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
+ {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
+};
+
+static void
+bna_port_sm_stopped_entry(struct bna_port *port)
+{
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+ call_port_stop_cbfn(port, BNA_CB_SUCCESS);
+}
+
+static void
+bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_START:
+ bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
+ break;
+
+ case PORT_E_STOP:
+ call_port_stop_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_FAIL:
+ /* No-op */
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_MTU_CFG:
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ /**
+ * This event is received due to LLPort, Tx and Rx objects
+ * failing
+ */
+ /* No-op */
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ case PORT_E_FWRESP_MTU:
+ /**
+ * These events are received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
+{
+ bna_fw_mtu_set(port);
+}
+
+static void
+bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ /* No-op */
+ break;
+
+ case PORT_E_MTU_CFG:
+ port->flags |= BNA_PORT_F_MTU_CHANGED;
+ break;
+
+ case PORT_E_FWRESP_MTU:
+ if (port->flags & BNA_PORT_F_MTU_CHANGED) {
+ port->flags &= ~BNA_PORT_F_MTU_CHANGED;
+ bna_fw_mtu_set(port);
+ } else {
+ bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_pause_init_wait_entry(struct bna_port *port)
+{
+ bna_fw_pause_set(port);
+}
+
+static void
+bna_port_sm_pause_init_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ port->flags |= BNA_PORT_F_PAUSE_CHANGED;
+ break;
+
+ case PORT_E_MTU_CFG:
+ port->flags |= BNA_PORT_F_MTU_CHANGED;
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
+ port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
+ bna_fw_pause_set(port);
+ } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
+ port->flags &= ~BNA_PORT_F_MTU_CHANGED;
+ bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
+ } else {
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ bna_port_chld_start(port);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_last_resp_wait_entry(struct bna_port *port)
+{
+}
+
+static void
+bna_port_sm_last_resp_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ case PORT_E_FWRESP_PAUSE:
+ case PORT_E_FWRESP_MTU:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_started_entry(struct bna_port *port)
+{
+ /**
+ * NOTE: Do not call bna_port_chld_start() here, since it will be
+ * inadvertently called during pause_cfg_wait->started transition
+ * as well
+ */
+ call_port_pause_cbfn(port, BNA_CB_SUCCESS);
+ call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
+}
+
+static void
+bna_port_sm_started(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_STOP:
+ bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
+ break;
+
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_PAUSE_CFG:
+ bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
+ break;
+
+ case PORT_E_MTU_CFG:
+ bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
+{
+ bna_fw_pause_set(port);
+}
+
+static void
+bna_port_sm_pause_cfg_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_FWRESP_PAUSE:
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
+{
+ bna_port_rx_stop(port);
+}
+
+static void
+bna_port_sm_rx_stop_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
+{
+ bna_fw_mtu_set(port);
+}
+
+static void
+bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_FWRESP_MTU:
+ bfa_fsm_set_state(port, bna_port_sm_started);
+ bna_port_rx_start(port);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
+{
+ bna_port_chld_stop(port);
+}
+
+static void
+bna_port_sm_chld_stop_wait(struct bna_port *port,
+ enum bna_port_event event)
+{
+ switch (event) {
+ case PORT_E_FAIL:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ bna_port_chld_fail(port);
+ break;
+
+ case PORT_E_CHLD_STOPPED:
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(port->bna, event);
+ }
+}
+
+static void
+bna_fw_pause_set(struct bna_port *port)
+{
+ struct bfi_ll_set_pause_req ll_req;
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+
+ ll_req.tx_pause = port->pause_config.tx_pause;
+ ll_req.rx_pause = port->pause_config.rx_pause;
+
+ bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_pause_set, port);
+
+ bna_mbox_send(port->bna, &port->mbox_qe);
+}
+
+static void
+bna_fw_cb_pause_set(void *arg, int status)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+ bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
+}
+
+void
+bna_fw_mtu_set(struct bna_port *port)
+{
+ struct bfi_ll_mtu_info_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
+ ll_req.mtu = htons((u16)port->mtu);
+
+ bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_mtu_set, port);
+ bna_mbox_send(port->bna, &port->mbox_qe);
+}
+
+void
+bna_fw_cb_mtu_set(void *arg, int status)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+ bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
+}
+
+static void
+bna_port_cb_chld_stopped(void *arg)
+{
+ struct bna_port *port = (struct bna_port *)arg;
+
+ bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
+}
+
+void
+bna_port_init(struct bna_port *port, struct bna *bna)
+{
+ port->bna = bna;
+ port->flags = 0;
+ port->mtu = 0;
+ port->type = BNA_PORT_T_REGULAR;
+
+ port->link_cbfn = bnad_cb_port_link_status;
+
+ port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
+ port->chld_stop_wc.wc_cbarg = port;
+ port->chld_stop_wc.wc_count = 0;
+
+ port->stop_cbfn = NULL;
+ port->stop_cbarg = NULL;
+
+ port->pause_cbfn = NULL;
+
+ port->mtu_cbfn = NULL;
+
+ bfa_q_qe_init(&port->mbox_qe.qe);
+
+ bfa_fsm_set_state(port, bna_port_sm_stopped);
+
+ bna_llport_init(&port->llport, bna);
+}
+
+void
+bna_port_uninit(struct bna_port *port)
+{
+ bna_llport_uninit(&port->llport);
+
+ port->flags = 0;
+
+ port->bna = NULL;
+}
+
+int
+bna_port_state_get(struct bna_port *port)
+{
+ return bfa_sm_to_state(port_sm_table, port->fsm);
+}
+
+void
+bna_port_start(struct bna_port *port)
+{
+ port->flags |= BNA_PORT_F_DEVICE_READY;
+ if (port->flags & BNA_PORT_F_ENABLED)
+ bfa_fsm_send_event(port, PORT_E_START);
+}
+
+void
+bna_port_stop(struct bna_port *port)
+{
+ port->stop_cbfn = bna_device_cb_port_stopped;
+ port->stop_cbarg = &port->bna->device;
+
+ port->flags &= ~BNA_PORT_F_DEVICE_READY;
+ bfa_fsm_send_event(port, PORT_E_STOP);
+}
+
+void
+bna_port_fail(struct bna_port *port)
+{
+ port->flags &= ~BNA_PORT_F_DEVICE_READY;
+ bfa_fsm_send_event(port, PORT_E_FAIL);
+}
+
+void
+bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+void
+bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
+{
+ bfa_wc_down(&port->chld_stop_wc);
+}
+
+void
+bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+ int status)
+{
+ int i;
+ u8 prio_map;
+
+ port->llport.link_status = BNA_LINK_UP;
+ if (aen->cee_linkup)
+ port->llport.link_status = BNA_CEE_UP;
+
+ /* Compute the priority */
+ prio_map = aen->prio_map;
+ if (prio_map) {
+ for (i = 0; i < 8; i++) {
+ if ((prio_map >> i) & 0x1)
+ break;
+ }
+ port->priority = i;
+ } else
+ port->priority = 0;
+
+ /* Dispatch events */
+ bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
+ bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
+ port->link_cbfn(port->bna->bnad, port->llport.link_status);
+}
+
+void
+bna_port_cb_link_down(struct bna_port *port, int status)
+{
+ port->llport.link_status = BNA_LINK_DOWN;
+
+ /* Dispatch events */
+ bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
+ port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
+}
+
+int
+bna_port_mtu_get(struct bna_port *port)
+{
+ return port->mtu;
+}
+
+void
+bna_port_enable(struct bna_port *port)
+{
+ if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
+ return;
+
+ port->flags |= BNA_PORT_F_ENABLED;
+
+ if (port->flags & BNA_PORT_F_DEVICE_READY)
+ bfa_fsm_send_event(port, PORT_E_START);
+}
+
+void
+bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
+ void (*cbfn)(void *, enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
+ return;
+ }
+
+ port->stop_cbfn = cbfn;
+ port->stop_cbarg = port->bna->bnad;
+
+ port->flags &= ~BNA_PORT_F_ENABLED;
+
+ bfa_fsm_send_event(port, PORT_E_STOP);
+}
+
+void
+bna_port_pause_config(struct bna_port *port,
+ struct bna_pause_config *pause_config,
+ void (*cbfn)(struct bnad *, enum bna_cb_status))
+{
+ port->pause_config = *pause_config;
+
+ port->pause_cbfn = cbfn;
+
+ bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
+}
+
+void
+bna_port_mtu_set(struct bna_port *port, int mtu,
+ void (*cbfn)(struct bnad *, enum bna_cb_status))
+{
+ port->mtu = mtu;
+
+ port->mtu_cbfn = cbfn;
+
+ bfa_fsm_send_event(port, PORT_E_MTU_CFG);
+}
+
+void
+bna_port_mac_get(struct bna_port *port, mac_t *mac)
+{
+ *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
+}
+
+/**
+ * Should be called only when port is disabled
+ */
+void
+bna_port_type_set(struct bna_port *port, enum bna_port_type type)
+{
+ port->type = type;
+ port->llport.type = type;
+}
+
+/**
+ * Should be called only when port is disabled
+ */
+void
+bna_port_linkcbfn_set(struct bna_port *port,
+ void (*linkcbfn)(struct bnad *, enum bna_link_status))
+{
+ port->link_cbfn = linkcbfn;
+}
+
+void
+bna_port_admin_up(struct bna_port *port)
+{
+ struct bna_llport *llport = &port->llport;
+
+ if (llport->flags & BNA_LLPORT_F_ENABLED)
+ return;
+
+ llport->flags |= BNA_LLPORT_F_ENABLED;
+
+ if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_UP);
+}
+
+void
+bna_port_admin_down(struct bna_port *port)
+{
+ struct bna_llport *llport = &port->llport;
+
+ if (!(llport->flags & BNA_LLPORT_F_ENABLED))
+ return;
+
+ llport->flags &= ~BNA_LLPORT_F_ENABLED;
+
+ if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
+ bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+}
+
+/**
+ * DEVICE
+ */
+#define enable_mbox_intr(_device)\
+do {\
+ u32 intr_status;\
+ bna_intr_status_get((_device)->bna, intr_status);\
+ bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
+ bna_mbox_intr_enable((_device)->bna);\
+} while (0)
+
+#define disable_mbox_intr(_device)\
+do {\
+ bna_mbox_intr_disable((_device)->bna);\
+ bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
+} while (0)
+
+const struct bna_chip_regs_offset reg_offset[] =
+{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
+ HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
+{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
+ HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
+{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
+ HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
+{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
+ HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
+};
+
+enum bna_device_event {
+ DEVICE_E_ENABLE = 1,
+ DEVICE_E_DISABLE = 2,
+ DEVICE_E_IOC_READY = 3,
+ DEVICE_E_IOC_FAILED = 4,
+ DEVICE_E_IOC_DISABLED = 5,
+ DEVICE_E_IOC_RESET = 6,
+ DEVICE_E_PORT_STOPPED = 7,
+};
+
+enum bna_device_state {
+ BNA_DEVICE_STOPPED = 1,
+ BNA_DEVICE_IOC_READY_WAIT = 2,
+ BNA_DEVICE_READY = 3,
+ BNA_DEVICE_PORT_STOP_WAIT = 4,
+ BNA_DEVICE_IOC_DISABLE_WAIT = 5,
+ BNA_DEVICE_FAILED = 6
+};
+
+bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ready, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
+ enum bna_device_event);
+bfa_fsm_state_decl(bna_device, failed, struct bna_device,
+ enum bna_device_event);
+
+static struct bfa_sm_table device_sm_table[] = {
+ {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
+ {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
+ {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
+ {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
+ {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
+ {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
+};
+
+static void
+bna_device_sm_stopped_entry(struct bna_device *device)
+{
+ if (device->stop_cbfn)
+ device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
+
+ device->stop_cbfn = NULL;
+ device->stop_cbarg = NULL;
+}
+
+static void
+bna_device_sm_stopped(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_ENABLE:
+ if (device->intr_type == BNA_INTR_T_MSIX)
+ bna_mbox_msix_idx_set(device);
+ bfa_nw_ioc_enable(&device->ioc);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ break;
+
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
+{
+ /**
+ * Do not call bfa_ioc_enable() here. It must be called in the
+ * previous state due to failed -> ioc_ready_wait transition.
+ */
+}
+
+static void
+bna_device_sm_ioc_ready_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_INTERRUPT);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_READY:
+ bfa_fsm_set_state(device, bna_device_sm_ready);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ready_entry(struct bna_device *device)
+{
+ bna_mbox_mod_start(&device->bna->mbox_mod);
+ bna_port_start(&device->bna->port);
+
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_SUCCESS);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+}
+
+static void
+bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ bfa_fsm_set_state(device, bna_device_sm_failed);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_port_stop_wait_entry(struct bna_device *device)
+{
+ bna_port_stop(&device->bna->port);
+}
+
+static void
+bna_device_sm_port_stop_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_PORT_STOPPED:
+ bna_mbox_mod_stop(&device->bna->mbox_mod);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_FAILED:
+ disable_mbox_intr(device);
+ bna_port_fail(&device->bna->port);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
+{
+ bfa_nw_ioc_disable(&device->ioc);
+}
+
+static void
+bna_device_sm_ioc_disable_wait(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_IOC_DISABLED:
+ disable_mbox_intr(device);
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+static void
+bna_device_sm_failed_entry(struct bna_device *device)
+{
+ disable_mbox_intr(device);
+ bna_port_fail(&device->bna->port);
+ bna_mbox_mod_stop(&device->bna->mbox_mod);
+
+ if (device->ready_cbfn)
+ device->ready_cbfn(device->ready_cbarg,
+ BNA_CB_FAIL);
+ device->ready_cbfn = NULL;
+ device->ready_cbarg = NULL;
+}
+
+static void
+bna_device_sm_failed(struct bna_device *device,
+ enum bna_device_event event)
+{
+ switch (event) {
+ case DEVICE_E_DISABLE:
+ bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
+ break;
+
+ case DEVICE_E_IOC_RESET:
+ enable_mbox_intr(device);
+ bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ break;
+
+ default:
+ bfa_sm_fault(device->bna, event);
+ }
+}
+
+/* IOC callback functions */
+
+static void
+bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ if (error)
+ bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
+ else
+ bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
+}
+
+static void
+bna_device_cb_iocll_disabled(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
+}
+
+static void
+bna_device_cb_iocll_failed(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
+}
+
+static void
+bna_device_cb_iocll_reset(void *dev)
+{
+ struct bna_device *device = (struct bna_device *)dev;
+
+ bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
+}
+
+static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
+ bna_device_cb_iocll_ready,
+ bna_device_cb_iocll_disabled,
+ bna_device_cb_iocll_failed,
+ bna_device_cb_iocll_reset
+};
+
+void
+bna_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u64 dma;
+
+ device->bna = bna;
+
+ /**
+ * Attach IOC and claim:
+ * 1. DMA memory for IOC attributes
+ * 2. Kernel memory for FW trace
+ */
+ bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
+ bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
+
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
+ bfa_nw_ioc_mem_claim(&device->ioc,
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
+ dma);
+
+ bna_adv_device_init(device, bna, res_info);
+ /*
+ * Initialize mbox_mod only after IOC, so that mbox handler
+ * registration goes through
+ */
+ device->intr_type =
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
+ device->vector =
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
+ bna_mbox_mod_init(&bna->mbox_mod, bna);
+
+ device->ready_cbfn = device->stop_cbfn = NULL;
+ device->ready_cbarg = device->stop_cbarg = NULL;
+
+ bfa_fsm_set_state(device, bna_device_sm_stopped);
+}
+
+void
+bna_device_uninit(struct bna_device *device)
+{
+ bna_mbox_mod_uninit(&device->bna->mbox_mod);
+
+ bfa_nw_ioc_detach(&device->ioc);
+
+ device->bna = NULL;
+}
+
+void
+bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
+{
+ struct bna_device *device = (struct bna_device *)arg;
+
+ bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
+}
+
+int
+bna_device_status_get(struct bna_device *device)
+{
+ return (device->fsm == (bfa_fsm_t)bna_device_sm_ready);
+}
+
+void
+bna_device_enable(struct bna_device *device)
+{
+ if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
+ bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
+ return;
+ }
+
+ device->ready_cbfn = bnad_cb_device_enabled;
+ device->ready_cbarg = device->bna->bnad;
+
+ bfa_fsm_send_event(device, DEVICE_E_ENABLE);
+}
+
+void
+bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
+ return;
+ }
+
+ device->stop_cbfn = bnad_cb_device_disabled;
+ device->stop_cbarg = device->bna->bnad;
+
+ bfa_fsm_send_event(device, DEVICE_E_DISABLE);
+}
+
+int
+bna_device_state_get(struct bna_device *device)
+{
+ return bfa_sm_to_state(device_sm_table, device->fsm);
+}
+
+u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 20},
+ {10, 18},
+ {8, 16},
+ {6, 12},
+ {4, 8},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ {12, 12},
+ {6, 10},
+ {5, 10},
+ {4, 8},
+ {3, 6},
+ {3, 6},
+ {2, 4},
+ {1, 2},
+};
+
+/* device */
+void
+bna_adv_device_init(struct bna_device *device, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ u8 *kva;
+ u64 dma;
+
+ device->bna = bna;
+
+ kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+
+ /**
+ * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+ * DMA memory.
+ */
+ BNA_GET_DMA_ADDR(
+ &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+ kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+
+ bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
+ bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
+ kva += bfa_nw_cee_meminfo();
+ dma += bfa_nw_cee_meminfo();
+
+}
+
+/* utils */
+
+void
+bna_adv_res_req(struct bna_res_info *res_info)
+{
+ /* DMA memory for COMMON_MODULE */
+ res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
+ bfa_nw_cee_meminfo(), PAGE_SIZE);
+
+ /* Virtual memory for retreiving fw_trc */
+ res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+
+ /* DMA memory for retreiving stats */
+ res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
+ ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
+
+ /* Virtual memory for soft stats */
+ res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
+ sizeof(struct bna_sw_stats);
+}
+
+static void
+bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
+{
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+ struct list_head *txq_qe;
+ struct list_head *rxp_qe;
+ struct list_head *mac_qe;
+ int i;
+
+ sw_stats->device_state = bna_device_state_get(&bna->device);
+ sw_stats->port_state = bna_port_state_get(&bna->port);
+ sw_stats->port_flags = bna->port.flags;
+ sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
+ sw_stats->priority = bna->port.priority;
+
+ i = 0;
+ list_for_each(qe, &bna->tx_mod.tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
+ sw_stats->tx_stats[i].tx_flags = tx->flags;
+
+ sw_stats->tx_stats[i].num_txqs = 0;
+ sw_stats->tx_stats[i].txq_bmap[0] = 0;
+ sw_stats->tx_stats[i].txq_bmap[1] = 0;
+ list_for_each(txq_qe, &tx->txq_q) {
+ txq = (struct bna_txq *)txq_qe;
+ if (txq->txq_id < 32)
+ sw_stats->tx_stats[i].txq_bmap[0] |=
+ ((u32)1 << txq->txq_id);
+ else
+ sw_stats->tx_stats[i].txq_bmap[1] |=
+ ((u32)
+ 1 << (txq->txq_id - 32));
+ sw_stats->tx_stats[i].num_txqs++;
+ }
+
+ sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
+
+ i++;
+ }
+ sw_stats->num_active_tx = i;
+
+ i = 0;
+ list_for_each(qe, &bna->rx_mod.rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
+ sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
+
+ sw_stats->rx_stats[i].num_rxps = 0;
+ sw_stats->rx_stats[i].num_rxqs = 0;
+ sw_stats->rx_stats[i].rxq_bmap[0] = 0;
+ sw_stats->rx_stats[i].rxq_bmap[1] = 0;
+ sw_stats->rx_stats[i].cq_bmap[0] = 0;
+ sw_stats->rx_stats[i].cq_bmap[1] = 0;
+ list_for_each(rxp_qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)rxp_qe;
+
+ sw_stats->rx_stats[i].num_rxqs += 1;
+
+ if (rxp->type == BNA_RXP_SINGLE) {
+ if (rxp->rxq.single.only->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.single.only->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.single.only->rxq_id - 32));
+ }
+ } else {
+ if (rxp->rxq.slr.large->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.slr.large->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.slr.large->rxq_id - 32));
+ }
+
+ if (rxp->rxq.slr.small->rxq_id < 32) {
+ sw_stats->rx_stats[i].rxq_bmap[0] |=
+ ((u32)1 <<
+ rxp->rxq.slr.small->rxq_id);
+ } else {
+ sw_stats->rx_stats[i].rxq_bmap[1] |=
+ ((u32)1 <<
+ (rxp->rxq.slr.small->rxq_id - 32));
+ }
+ sw_stats->rx_stats[i].num_rxqs += 1;
+ }
+
+ if (rxp->cq.cq_id < 32)
+ sw_stats->rx_stats[i].cq_bmap[0] |=
+ (1 << rxp->cq.cq_id);
+ else
+ sw_stats->rx_stats[i].cq_bmap[1] |=
+ (1 << (rxp->cq.cq_id - 32));
+
+ sw_stats->rx_stats[i].num_rxps++;
+ }
+
+ sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
+ sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
+ sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
+
+ sw_stats->rx_stats[i].num_active_ucast = 0;
+ if (rx->rxf.ucast_active_mac)
+ sw_stats->rx_stats[i].num_active_ucast++;
+ list_for_each(mac_qe, &rx->rxf.ucast_active_q)
+ sw_stats->rx_stats[i].num_active_ucast++;
+
+ sw_stats->rx_stats[i].num_active_mcast = 0;
+ list_for_each(mac_qe, &rx->rxf.mcast_active_q)
+ sw_stats->rx_stats[i].num_active_mcast++;
+
+ sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
+ sw_stats->rx_stats[i].vlan_filter_status =
+ rx->rxf.vlan_filter_status;
+ memcpy(sw_stats->rx_stats[i].vlan_filter_table,
+ rx->rxf.vlan_filter_table,
+ sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
+
+ sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
+ sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
+
+ i++;
+ }
+ sw_stats->num_active_rx = i;
+}
+
+static void
+bna_fw_cb_stats_get(void *arg, int status)
+{
+ struct bna *bna = (struct bna *)arg;
+ u64 *p_stats;
+ int i, count;
+ int rxf_count, txf_count;
+ u64 rxf_bmap, txf_bmap;
+
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+
+ if (status == 0) {
+ p_stats = (u64 *)bna->stats.hw_stats;
+ count = sizeof(struct bfi_ll_stats) / sizeof(u64);
+ for (i = 0; i < count; i++)
+ p_stats[i] = cpu_to_be64(p_stats[i]);
+
+ rxf_count = 0;
+ rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
+ ((u64)bna->stats.rxf_bmap[1] << 32);
+ for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
+ if (rxf_bmap & ((u64)1 << i))
+ rxf_count++;
+
+ txf_count = 0;
+ txf_bmap = (u64)bna->stats.txf_bmap[0] |
+ ((u64)bna->stats.txf_bmap[1] << 32);
+ for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
+ if (txf_bmap & ((u64)1 << i))
+ txf_count++;
+
+ p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
+ ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
+ txf_count * sizeof(struct bfi_ll_stats_txf))/
+ sizeof(u64));
+
+ /* Populate the TXF stats from the firmware DMAed copy */
+ for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
+ if (txf_bmap & ((u64)1 << i)) {
+ p_stats -= sizeof(struct bfi_ll_stats_txf)/
+ sizeof(u64);
+ memcpy(&bna->stats.hw_stats->txf_stats[i],
+ p_stats,
+ sizeof(struct bfi_ll_stats_txf));
+ }
+
+ /* Populate the RXF stats from the firmware DMAed copy */
+ for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
+ if (rxf_bmap & ((u64)1 << i)) {
+ p_stats -= sizeof(struct bfi_ll_stats_rxf)/
+ sizeof(u64);
+ memcpy(&bna->stats.hw_stats->rxf_stats[i],
+ p_stats,
+ sizeof(struct bfi_ll_stats_rxf));
+ }
+
+ bna_sw_stats_get(bna, bna->stats.sw_stats);
+ bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
+ } else
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+}
+
+static void
+bna_fw_stats_get(struct bna *bna)
+{
+ struct bfi_ll_stats_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
+ ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
+
+ ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
+ ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
+ ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
+ ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
+
+ ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
+ ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
+
+ bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_stats_get, bna);
+ bna_mbox_send(bna, &bna->mbox_qe);
+
+ bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
+ bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
+ bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
+ bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
+}
+
+static void
+bna_fw_cb_stats_clr(void *arg, int status)
+{
+ struct bna *bna = (struct bna *)arg;
+
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+
+ memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
+ memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
+
+ bnad_cb_stats_clr(bna->bnad);
+}
+
+static void
+bna_fw_stats_clr(struct bna *bna)
+{
+ struct bfi_ll_stats_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
+ ll_req.rxf_id_mask[0] = htonl(0xffffffff);
+ ll_req.rxf_id_mask[1] = htonl(0xffffffff);
+ ll_req.txf_id_mask[0] = htonl(0xffffffff);
+ ll_req.txf_id_mask[1] = htonl(0xffffffff);
+
+ bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_fw_cb_stats_clr, bna);
+ bna_mbox_send(bna, &bna->mbox_qe);
+}
+
+void
+bna_stats_get(struct bna *bna)
+{
+ if (bna_device_status_get(&bna->device))
+ bna_fw_stats_get(bna);
+ else
+ bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+}
+
+void
+bna_stats_clr(struct bna *bna)
+{
+ if (bna_device_status_get(&bna->device))
+ bna_fw_stats_clr(bna);
+ else {
+ memset(&bna->stats.sw_stats, 0,
+ sizeof(struct bna_sw_stats));
+ memset(bna->stats.hw_stats, 0,
+ sizeof(struct bfi_ll_stats));
+ bnad_cb_stats_clr(bna->bnad);
+ }
+}
+
+/* IB */
+void
+bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
+{
+ ib->ib_config.coalescing_timeo = coalescing_timeo;
+
+ if (ib->start_count)
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->ib_config.coalescing_timeo, 0);
+}
+
+/* RxF */
+void
+bna_rxf_adv_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config)
+{
+ switch (q_config->rxp_type) {
+ case BNA_RXP_SINGLE:
+ /* No-op */
+ break;
+ case BNA_RXP_SLR:
+ rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
+ break;
+ case BNA_RXP_HDS:
+ rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
+ rxf->hds_cfg.header_size =
+ q_config->hds_config.header_size;
+ rxf->forced_offset = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
+ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
+ rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
+ rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
+ memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
+ &q_config->rss_config.toeplitz_hash_key[0],
+ sizeof(rxf->rss_cfg.toeplitz_hash_key));
+ }
+}
+
+static void
+rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
+{
+ struct bfi_ll_rxf_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf->rxf_id;
+ req.enable = status;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
+ rxf_cb_cam_fltr_mbox_cmd, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+void
+__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ u32 ctrl_flags;
+ int i;
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)
+ BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
+ RX_FNDB_RAM_BASE_OFFSET);
+
+ for (i = 0; i < BFI_MAX_RXF; i++) {
+ if (status == BNA_STATUS_T_ENABLED) {
+ if (i == rxf->rxf_id)
+ continue;
+
+ ctrl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctrl_flags,
+ &rx_fndb_ram[i].control_flags);
+ } else {
+ ctrl_flags =
+ readl(&rx_fndb_ram[i].control_flags);
+ ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+ writel(ctrl_flags,
+ &rx_fndb_ram[i].control_flags);
+ }
+ }
+}
+
+int
+rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Add additional MAC entries */
+ if (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->ucast_active_q);
+ return 1;
+ }
+
+ /* Delete MAC addresses previousely added */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable promiscuous mode */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_PROMISC;
+
+ /* Disable VLAN filter to allow all VLANs */
+ __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* Enable/disable default mode */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
+
+ /* Disable VLAN filter to allow all VLANs */
+ __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+ /* Redirect all other RxF vlan filtering to this one */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* Enable/disable allmulti mode */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
+
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_ENABLED);
+ return 1;
+ } else if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* 1. delete pending ucast entries */
+ if (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ return 1;
+ }
+
+ /* 2. clear active ucast entries; move them to pending_add_q */
+ if (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 6. Execute pending promisc mode disable command */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move promisc configuration from pending -> active */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 7. Clear active promisc mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* move promisc configuration from active -> pending */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 8. Execute pending default mode disable command */
+ if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move default configuration from pending -> active */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 9. Clear active default mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ /* move default configuration from active -> pending */
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+
+ /* Revert VLAN filter */
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ /* Stop RxF vlan filter table redirection */
+ __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* 10. Execute pending allmulti mode disable command */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* move allmulti configuration from pending -> active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ /* 11. Clear active allmulti mode; move it to pending enable */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ /* move allmulti configuration from active -> pending */
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
+ BNA_STATUS_T_DISABLED);
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* 1. Move active ucast entries to pending_add_q */
+ while (!list_empty(&rxf->ucast_active_q)) {
+ bfa_q_deq(&rxf->ucast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_add_q);
+ }
+
+ /* 2. Throw away delete pending ucast entries */
+ while (!list_empty(&rxf->ucast_pending_del_q)) {
+ bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+}
+
+void
+rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 6. Clear pending promisc mode disable */
+ if (is_promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+ }
+
+ /* 7. Move promisc mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
+ }
+
+}
+
+void
+rxf_reset_packet_filter_default(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+
+ /* 8. Clear pending default mode disable */
+ if (is_default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ bna->rxf_default_id = BFI_MAX_RXF;
+ }
+
+ /* 9. Move default mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+ }
+}
+
+void
+rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
+{
+ /* 10. Clear pending allmulti mode disable */
+ if (is_allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ }
+
+ /* 11. Move allmulti mode config from active -> pending */
+ if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
+ }
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_promisc_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
+ /* Schedule enable */
+ } else {
+ /* Promisc mode should not be active in the system */
+ promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_promisc_id = rxf->rxf_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_promisc_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Promisc mode should not be active */
+ /* system promisc state should be pending */
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ /* Remove the promisc state from the system */
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
+ /* Promisc mode should be active in the system */
+ promisc_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+
+ /* Do nothing if already disabled */
+ } else {
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_default_enable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
+ /* Schedule enable */
+ } else {
+ /* Default mode should not be active in the system */
+ default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_default_id = rxf->rxf_id;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_default_disable(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_default_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Promisc mode should not be active */
+ /* system default state should be pending */
+ default_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ /* Remove the default state from the system */
+ bna->rxf_default_id = BFI_MAX_RXF;
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+ /* Default mode should be active in the system */
+ default_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+
+ /* Do nothing if already disabled */
+ } else {
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_allmulti_enable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ /* There can not be any pending disable command */
+
+ /* Do nothing if pending enable or already enabled */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask) ||
+ (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
+ /* Schedule enable */
+ } else {
+ allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Should only be called by bna_rxf_mode_set.
+ * Helps deciding if h/w configuration is needed or not.
+ * Returns:
+ * 0 = no h/w change
+ * 1 = need h/w change
+ */
+int
+rxf_allmulti_disable(struct bna_rxf *rxf)
+{
+ int ret = 0;
+
+ /* There can not be any pending disable */
+
+ /* Turn off pending enable command , if any */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* Allmulti mode should not be active */
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+
+ /* Schedule disable */
+ } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
+ allmulti_disable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* RxF <- bnad */
+void
+bna_rx_mcast_delall(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+ int need_hw_config = 0;
+
+ /* Purge all entries from pending_add_q */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ /* Schedule all entries in active_q for deletion */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ need_hw_config = 1;
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ return;
+ }
+
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+}
+
+/* RxF <- Rx */
+void
+bna_rx_receive_resume(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
+ rxf->oper_state_cbfn = cbfn;
+ rxf->oper_state_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_RESUME);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+}
+
+void
+bna_rx_receive_pause(struct bna_rx *rx,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
+ rxf->oper_state_cbfn = cbfn;
+ rxf->oper_state_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_PAUSE);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Check if already added */
+ list_for_each(qe, &rxf->ucast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ /* Check if pending addition */
+ list_for_each(qe, &rxf->ucast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ list_for_each(qe, &rxf->ucast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ list_for_each(qe, &rxf->ucast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->ucast_pending_del_q);
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ return BNA_CB_INVALID_MAC;
+}
+
+/* RxF <- bnad */
+enum bna_cb_status
+bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ enum bna_rxmode bitmask,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int need_hw_config = 0;
+
+ /* Error checks */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ /* If promisc mode is already enabled elsewhere in the system */
+ if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
+ (rx->bna->rxf_promisc_id != rxf->rxf_id))
+ goto err_return;
+
+ /* If default mode is already enabled in the system */
+ if (rx->bna->rxf_default_id != BFI_MAX_RXF)
+ goto err_return;
+
+ /* Trying to enable promiscuous and default mode together */
+ if (is_default_enable(new_mode, bitmask))
+ goto err_return;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ /* If default mode is already enabled elsewhere in the system */
+ if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
+ (rx->bna->rxf_default_id != rxf->rxf_id)) {
+ goto err_return;
+ }
+
+ /* If promiscuous mode is already enabled in the system */
+ if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
+ goto err_return;
+ }
+
+ /* Process the commands */
+
+ if (is_promisc_enable(new_mode, bitmask)) {
+ if (rxf_promisc_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_promisc_disable(new_mode, bitmask)) {
+ if (rxf_promisc_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_default_enable(new_mode, bitmask)) {
+ if (rxf_default_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_default_disable(new_mode, bitmask)) {
+ if (rxf_default_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ if (is_allmulti_enable(new_mode, bitmask)) {
+ if (rxf_allmulti_enable(rxf))
+ need_hw_config = 1;
+ } else if (is_allmulti_disable(new_mode, bitmask)) {
+ if (rxf_allmulti_disable(rxf))
+ need_hw_config = 1;
+ }
+
+ /* Trigger h/w if needed */
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ return BNA_CB_FAIL;
+}
+
+/* RxF <- bnad */
+void
+bna_rx_rss_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+ rxf->rss_status = BNA_STATUS_T_ENABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+}
+
+/* RxF <- bnad */
+void
+bna_rx_rss_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+ rxf->rss_status = BNA_STATUS_T_DISABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+}
+
+/* RxF <- bnad */
+void
+bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+ rxf->rss_status = BNA_STATUS_T_ENABLED;
+ rxf->rss_cfg = *rss_config;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+}
+
+void
+/* RxF <- bnad */
+bna_rx_vlanfilter_enable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/* RxF <- bnad */
+void
+bna_rx_vlanfilter_disable(struct bna_rx *rx)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/* Rx */
+
+struct bna_rxp *
+bna_rx_get_rxp(struct bna_rx *rx, int vector)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ if (rxp->vector == vector)
+ return rxp;
+ }
+ return NULL;
+}
+
+/*
+ * bna_rx_rss_rit_set()
+ * Sets the Q ids for the specified msi-x vectors in the RIT.
+ * Maximum rit size supported is 64, which should be the max size of the
+ * vectors array.
+ */
+
+void
+bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
+{
+ int i;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+ struct bna *bna;
+ struct bna_rxf *rxf;
+
+ /* Build the RIT contents for this RX */
+ bna = rx->bna;
+
+ rxf = &rx->rxf;
+ for (i = 0; i < nvectors; i++) {
+ rxp = bna_rx_get_rxp(rx, vectors[i]);
+
+ GET_RXQS(rxp, q0, q1);
+ rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
+ rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
+ }
+
+ rxf->rit_segment->rit_size = nvectors;
+
+ /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
+}
+
+/* Rx <- bnad */
+void
+bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
+ bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
+ }
+}
+
+/* Rx <- bnad */
+void
+bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
+{
+ int i, j;
+
+ for (i = 0; i < BNA_LOAD_T_MAX; i++)
+ for (j = 0; j < BNA_BIAS_T_MAX; j++)
+ bna->rx_mod.dim_vector[i][j] = vector[i][j];
+}
+
+/* Rx <- bnad */
+void
+bna_rx_dim_update(struct bna_ccb *ccb)
+{
+ struct bna *bna = ccb->cq->rx->bna;
+ u32 load, bias;
+ u32 pkt_rt, small_rt, large_rt;
+ u8 coalescing_timeo;
+
+ if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
+ (ccb->pkt_rate.large_pkt_cnt == 0))
+ return;
+
+ /* Arrive at preconfigured coalescing timeo value based on pkt rate */
+
+ small_rt = ccb->pkt_rate.small_pkt_cnt;
+ large_rt = ccb->pkt_rate.large_pkt_cnt;
+
+ pkt_rt = small_rt + large_rt;
+
+ if (pkt_rt < BNA_PKT_RATE_10K)
+ load = BNA_LOAD_T_LOW_4;
+ else if (pkt_rt < BNA_PKT_RATE_20K)
+ load = BNA_LOAD_T_LOW_3;
+ else if (pkt_rt < BNA_PKT_RATE_30K)
+ load = BNA_LOAD_T_LOW_2;
+ else if (pkt_rt < BNA_PKT_RATE_40K)
+ load = BNA_LOAD_T_LOW_1;
+ else if (pkt_rt < BNA_PKT_RATE_50K)
+ load = BNA_LOAD_T_HIGH_1;
+ else if (pkt_rt < BNA_PKT_RATE_60K)
+ load = BNA_LOAD_T_HIGH_2;
+ else if (pkt_rt < BNA_PKT_RATE_80K)
+ load = BNA_LOAD_T_HIGH_3;
+ else
+ load = BNA_LOAD_T_HIGH_4;
+
+ if (small_rt > (large_rt << 1))
+ bias = 0;
+ else
+ bias = 1;
+
+ ccb->pkt_rate.small_pkt_cnt = 0;
+ ccb->pkt_rate.large_pkt_cnt = 0;
+
+ coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
+ ccb->rx_coalescing_timeo = coalescing_timeo;
+
+ /* Set it to IB */
+ bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
+}
+
+/* Tx */
+/* TX <- bnad */
+enum bna_cb_status
+bna_tx_prio_set(struct bna_tx *tx, int prio,
+ void (*cbfn)(struct bnad *, struct bna_tx *,
+ enum bna_cb_status))
+{
+ if (tx->flags & BNA_TX_F_PRIO_LOCK)
+ return BNA_CB_FAIL;
+ else {
+ tx->prio_change_cbfn = cbfn;
+ bna_tx_prio_changed(tx, prio);
+ }
+
+ return BNA_CB_SUCCESS;
+}
+
+/* TX <- bnad */
+void
+bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
+ }
+}
+
+/*
+ * Private data
+ */
+
+struct bna_ritseg_pool_cfg {
+ u32 pool_size;
+ u32 pool_entry_size;
+};
+init_ritseg_pool(ritseg_pool_cfg);
+
+/*
+ * Private functions
+ */
+static void
+bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ ucam_mod->ucmac = (struct bna_mac *)
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ucam_mod->free_q);
+ for (i = 0; i < BFI_MAX_UCMAC; i++) {
+ bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
+ }
+
+ ucam_mod->bna = bna;
+}
+
+static void
+bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &ucam_mod->free_q)
+ i++;
+
+ ucam_mod->bna = NULL;
+}
+
+static void
+bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ mcam_mod->mcmac = (struct bna_mac *)
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&mcam_mod->free_q);
+ for (i = 0; i < BFI_MAX_MCMAC; i++) {
+ bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
+ }
+
+ mcam_mod->bna = bna;
+}
+
+static void
+bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+ int i = 0;
+
+ list_for_each(qe, &mcam_mod->free_q)
+ i++;
+
+ mcam_mod->bna = NULL;
+}
+
+static void
+bna_rit_mod_init(struct bna_rit_mod *rit_mod,
+ struct bna_res_info *res_info)
+{
+ int i;
+ int j;
+ int count;
+ int offset;
+
+ rit_mod->rit = (struct bna_rit_entry *)
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
+ rit_mod->rit_segment = (struct bna_rit_segment *)
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
+
+ count = 0;
+ offset = 0;
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
+ for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
+ bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
+ rit_mod->rit_segment[count].max_rit_size =
+ ritseg_pool_cfg[i].pool_entry_size;
+ rit_mod->rit_segment[count].rit_offset = offset;
+ rit_mod->rit_segment[count].rit =
+ &rit_mod->rit[offset];
+ list_add_tail(&rit_mod->rit_segment[count].qe,
+ &rit_mod->rit_seg_pool[i]);
+ count++;
+ offset += ritseg_pool_cfg[i].pool_entry_size;
+ }
+ }
+}
+
+static void
+bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
+{
+ struct bna_rit_segment *rit_segment;
+ struct list_head *qe;
+ int i;
+ int j;
+
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ j = 0;
+ list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
+ rit_segment = (struct bna_rit_segment *)qe;
+ j++;
+ }
+ }
+}
+
+/*
+ * Public functions
+ */
+
+/* Called during probe(), before calling bna_init() */
+void
+bna_res_req(struct bna_res_info *res_info)
+{
+ bna_adv_res_req(res_info);
+
+ /* DMA memory for retrieving IOC attributes */
+ res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
+ ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
+
+ /* DMA memory for index segment of an IB */
+ res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
+ BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
+
+ /* Virtual memory for IB objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
+ BFI_MAX_IB * sizeof(struct bna_ib);
+
+ /* Virtual memory for intr objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
+ BFI_MAX_IB * sizeof(struct bna_intr);
+
+ /* Virtual memory for idx_seg objects - stored by IB module */
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
+ BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
+
+ /* Virtual memory for Tx objects - stored by Tx module */
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
+ BFI_MAX_TXQ * sizeof(struct bna_tx);
+
+ /* Virtual memory for TxQ - stored by Tx module */
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
+ BFI_MAX_TXQ * sizeof(struct bna_txq);
+
+ /* Virtual memory for Rx objects - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rx);
+
+ /* Virtual memory for RxPath - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rxp);
+
+ /* Virtual memory for RxQ - stored by Rx module */
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
+ BFI_MAX_RXQ * sizeof(struct bna_rxq);
+
+ /* Virtual memory for Unicast MAC address - stored by ucam module */
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
+ BFI_MAX_UCMAC * sizeof(struct bna_mac);
+
+ /* Virtual memory for Multicast MAC address - stored by mcam module */
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
+ BFI_MAX_MCMAC * sizeof(struct bna_mac);
+
+ /* Virtual memory for RIT entries */
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
+ BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
+
+ /* Virtual memory for RIT segment table */
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
+ BNA_MEM_T_KVA;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
+ BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
+
+ /* Interrupt resource for mailbox interrupt */
+ res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
+}
+
+/* Called during probe() */
+void
+bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
+ struct bna_res_info *res_info)
+{
+ bna->bnad = bnad;
+ bna->pcidev = *pcidev;
+
+ bna->stats.hw_stats = (struct bfi_ll_stats *)
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
+ bna->hw_stats_dma.msb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
+ bna->hw_stats_dma.lsb =
+ res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
+ bna->stats.sw_stats = (struct bna_sw_stats *)
+ res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
+
+ bna->regs.page_addr = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].page_addr;
+ bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].fn_int_status;
+ bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
+ reg_offset[bna->pcidev.pci_func].fn_int_mask;
+
+ if (bna->pcidev.pci_func < 3)
+ bna->port_num = 0;
+ else
+ bna->port_num = 1;
+
+ /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
+ bna_device_init(&bna->device, bna, res_info);
+
+ bna_port_init(&bna->port, bna);
+
+ bna_tx_mod_init(&bna->tx_mod, bna, res_info);
+
+ bna_rx_mod_init(&bna->rx_mod, bna, res_info);
+
+ bna_ib_mod_init(&bna->ib_mod, bna, res_info);
+
+ bna_rit_mod_init(&bna->rit_mod, res_info);
+
+ bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
+
+ bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
+
+ bna->rxf_default_id = BFI_MAX_RXF;
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+
+ /* Mbox q element for posting stat request to f/w */
+ bfa_q_qe_init(&bna->mbox_qe.qe);
+}
+
+void
+bna_uninit(struct bna *bna)
+{
+ bna_mcam_mod_uninit(&bna->mcam_mod);
+
+ bna_ucam_mod_uninit(&bna->ucam_mod);
+
+ bna_rit_mod_uninit(&bna->rit_mod);
+
+ bna_ib_mod_uninit(&bna->ib_mod);
+
+ bna_rx_mod_uninit(&bna->rx_mod);
+
+ bna_tx_mod_uninit(&bna->tx_mod);
+
+ bna_port_uninit(&bna->port);
+
+ bna_device_uninit(&bna->device);
+
+ bna->bnad = NULL;
+}
+
+struct bna_mac *
+bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&ucam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&ucam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &ucam_mod->free_q);
+}
+
+struct bna_mac *
+bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
+{
+ struct list_head *qe;
+
+ if (list_empty(&mcam_mod->free_q))
+ return NULL;
+
+ bfa_q_deq(&mcam_mod->free_q, &qe);
+
+ return (struct bna_mac *)qe;
+}
+
+void
+bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
+{
+ list_add_tail(&mac->qe, &mcam_mod->free_q);
+}
+
+/**
+ * Note: This should be called in the same locking context as the call to
+ * bna_rit_mod_seg_get()
+ */
+int
+bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
+{
+ int i;
+
+ /* Select the pool for seg_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ if (i == BFI_RIT_SEG_TOTAL_POOLS)
+ return 0;
+
+ if (list_empty(&rit_mod->rit_seg_pool[i]))
+ return 0;
+
+ return 1;
+}
+
+struct bna_rit_segment *
+bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
+{
+ struct bna_rit_segment *seg;
+ struct list_head *qe;
+ int i;
+
+ /* Select the pool for seg_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ if (i == BFI_RIT_SEG_TOTAL_POOLS)
+ return NULL;
+
+ if (list_empty(&rit_mod->rit_seg_pool[i]))
+ return NULL;
+
+ bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
+ seg = (struct bna_rit_segment *)qe;
+ bfa_q_qe_init(&seg->qe);
+ seg->rit_size = seg_size;
+
+ return seg;
+}
+
+void
+bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+ struct bna_rit_segment *seg)
+{
+ int i;
+
+ /* Select the pool for seg->max_rit_size */
+ for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
+ if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
+ break;
+ }
+
+ seg->rit_size = 0;
+ list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
+}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
new file mode 100644
index 000000000000..67eb376c5c7e
--- /dev/null
+++ b/drivers/net/bna/bna_hw.h
@@ -0,0 +1,1491 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * File for interrupt macros and functions
+ */
+
+#ifndef __BNA_HW_H__
+#define __BNA_HW_H__
+
+#include "bfi_ctreg.h"
+
+/**
+ *
+ * SW imposed limits
+ *
+ */
+
+#ifndef BNA_BIOS_BUILD
+
+#define BFI_MAX_TXQ 64
+#define BFI_MAX_RXQ 64
+#define BFI_MAX_RXF 64
+#define BFI_MAX_IB 128
+#define BFI_MAX_RIT_SIZE 256
+#define BFI_RSS_RIT_SIZE 64
+#define BFI_NONRSS_RIT_SIZE 1
+#define BFI_MAX_UCMAC 256
+#define BFI_MAX_MCMAC 512
+#define BFI_IBIDX_SIZE 4
+#define BFI_MAX_VLAN 4095
+
+/**
+ * There are 2 free IB index pools:
+ * pool1: 120 segments of 1 index each
+ * pool8: 1 segment of 8 indexes
+ */
+#define BFI_IBIDX_POOL1_SIZE 116
+#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
+#define BFI_IBIDX_POOL2_SIZE 2
+#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
+#define BFI_IBIDX_POOL8_SIZE 1
+#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
+#define BFI_IBIDX_TOTAL_POOLS 3
+#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
+#define BFI_IBIDX_MAX_SEGSIZE 8
+#define init_ibidx_pool(name) \
+static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
+{ \
+ { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
+ { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
+ { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
+}
+
+/**
+ * There are 2 free RIT segment pools:
+ * Pool1: 192 segments of 1 RIT entry each
+ * Pool2: 1 segment of 64 RIT entry
+ */
+#define BFI_RIT_SEG_POOL1_SIZE 192
+#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
+#define BFI_RIT_SEG_POOLRSS_SIZE 1
+#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
+#define BFI_RIT_SEG_TOTAL_POOLS 2
+#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
+#define init_ritseg_pool(name) \
+static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
+{ \
+ { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
+ { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
+}
+
+#else /* BNA_BIOS_BUILD */
+
+#define BFI_MAX_TXQ 1
+#define BFI_MAX_RXQ 1
+#define BFI_MAX_RXF 1
+#define BFI_MAX_IB 2
+#define BFI_MAX_RIT_SIZE 2
+#define BFI_RSS_RIT_SIZE 64
+#define BFI_NONRSS_RIT_SIZE 1
+#define BFI_MAX_UCMAC 1
+#define BFI_MAX_MCMAC 8
+#define BFI_IBIDX_SIZE 4
+#define BFI_MAX_VLAN 4095
+/* There is one free pool: 2 segments of 1 index each */
+#define BFI_IBIDX_POOL1_SIZE 2
+#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
+#define BFI_IBIDX_TOTAL_POOLS 1
+#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
+#define BFI_IBIDX_MAX_SEGSIZE 1
+#define init_ibidx_pool(name) \
+static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
+{ \
+ { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
+}
+
+#define BFI_RIT_SEG_POOL1_SIZE 1
+#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
+#define BFI_RIT_SEG_TOTAL_POOLS 1
+#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
+#define init_ritseg_pool(name) \
+static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
+{ \
+ { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
+}
+
+#endif /* BNA_BIOS_BUILD */
+
+#define BFI_RSS_HASH_KEY_LEN 10
+
+#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
+#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
+#define BFI_MAX_INTERPKT_COUNT 0xFF
+#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
+#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
+#define BFI_TX_INTERPKT_COUNT 32
+#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
+#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
+#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
+
+#define BFI_TXQ_WI_SIZE 64 /* bytes */
+#define BFI_RXQ_WI_SIZE 8 /* bytes */
+#define BFI_CQ_WI_SIZE 16 /* bytes */
+#define BFI_TX_MAX_WRR_QUOTA 0xFFF
+
+#define BFI_TX_MAX_VECTORS_PER_WI 4
+#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
+#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
+#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
+
+/* Small Q buffer size */
+#define BFI_SMALL_RXBUF_SIZE 128
+
+/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
+#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
+#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
+
+/**
+ *
+ * HW register offsets, macros
+ *
+ */
+
+/* DMA Block Register Host Window Start Address */
+#define DMA_BLK_REG_ADDR 0x00013000
+
+/* DMA Block Internal Registers */
+#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
+#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
+#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
+#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
+#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
+
+/* APP Block Register Address Offset from BAR0 */
+#define APP_BLK_REG_ADDR 0x00014000
+
+/* Host Function Interrupt Mask Registers */
+#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
+#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
+#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
+#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
+
+/**
+ * Host Function PCIe Error Registers
+ * Duplicates "Correctable" & "Uncorrectable"
+ * registers in PCIe Config space.
+ */
+#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
+#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
+#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
+#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
+
+/* Host Function Error Type Status Registers */
+#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
+#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
+#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
+#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
+
+/* Host Function Error Type Mask Registers */
+#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
+#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
+#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
+#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
+
+/* Catapult Host Semaphore Status Registers (App block) */
+#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
+#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
+#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
+#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
+#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
+#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
+#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
+#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
+
+/* PCIe Misc Register */
+#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
+
+/* Temp Sensor Control Registers */
+#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
+#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
+
+/* APP Block local error registers */
+#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
+#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
+
+/* PCIe Link Error registers */
+#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
+#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
+
+/**
+ * FCoE/FIP Ethertype Register
+ * 31:16 -- Chip wide value for FIP type
+ * 15:0 -- Chip wide value for FCoE type
+ */
+#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
+
+/**
+ * Reserved Ethertype Register
+ * 31:16 -- Reserved
+ * 15:0 -- Other ethertype
+ */
+#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
+
+/**
+ * Host Command Status Registers
+ * Each set consists of 3 registers :
+ * clear, set, cmd
+ * 16 such register sets in all
+ * See catapult_spec.pdf for detailed functionality
+ * Put each type in a single macro accessed by _num ?
+ */
+#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
+#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
+#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
+#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
+#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
+#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
+#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
+#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
+#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
+#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
+#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
+#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
+#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
+#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
+#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
+#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
+#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
+#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
+#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
+#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
+#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
+#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
+#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
+#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
+#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
+#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
+#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
+#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
+#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
+#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
+#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
+#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
+#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
+#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
+#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
+#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
+#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
+#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
+#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
+#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
+#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
+#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
+#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
+#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
+#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
+#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
+#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
+#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
+
+/**
+ * LPU0 Block Register Address Offset from BAR0
+ * Range 0x18000 - 0x18033
+ */
+#define LPU0_BLK_REG_ADDR 0x00018000
+
+/**
+ * LPU0 Registers
+ * Should they be directly used from host,
+ * except for diagnostics ?
+ * CTL_REG : Control register
+ * CMD_REG : Triggers exec. of cmd. in
+ * Mailbox memory
+ */
+#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
+#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
+#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
+#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
+#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
+#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
+#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
+#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
+
+/**
+ * LPU1 Block Register Address Offset from BAR0
+ * Range 0x18400 - 0x18433
+ */
+#define LPU1_BLK_REG_ADDR 0x00018400
+
+/**
+ * LPU1 Registers
+ * Same as LPU0 registers above
+ */
+#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
+#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
+#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
+#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
+#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
+#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
+#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
+#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
+
+/**
+ * PSS Block Register Address Offset from BAR0
+ * Range 0x18800 - 0x188DB
+ */
+#define PSS_BLK_REG_ADDR 0x00018800
+
+/**
+ * PSS Registers
+ * For details, see catapult_spec.pdf
+ * ERR_STATUS_REG : Indicates error in PSS module
+ * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
+ */
+#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
+#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
+
+/**
+ * PSS Semaphore Lock Registers, total 16
+ * First read when unlocked returns 0,
+ * and is set to 1, atomically.
+ * Subsequent reads returns 1.
+ * To clear set the value to 0.
+ * Range : 0x20 to 0x5c
+ */
+#define PSS_SEM_LOCK_REG(_num) \
+ (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+/**
+ * PSS Semaphore Status Registers,
+ * corresponding to the lock registers above
+ */
+#define PSS_SEM_STATUS_REG(_num) \
+ (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
+
+/**
+ * Catapult CPQ Registers
+ * Defines for Mailbox Registers
+ * Used to send mailbox commands to firmware from
+ * host. The data part is written to the MBox
+ * memory, registers are used to indicate that
+ * a commnad is resident in memory.
+ *
+ * Note : LPU0<->LPU1 mailboxes are not listed here
+ */
+#define CPQ_BLK_REG_ADDR 0x00019000
+
+#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
+#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
+#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
+#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
+
+#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
+#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
+#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
+#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
+
+#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
+#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
+#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
+#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
+
+#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
+#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
+#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
+#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
+
+/* Host Function Force Parity Error Registers */
+#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
+#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
+#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
+#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
+
+/* LL Port[0|1] Halt Mask Registers */
+#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
+#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
+
+/* LL Port[0|1] Error Mask Registers */
+#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
+#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
+
+/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
+#define FLI_BLK_REG_ADDR 0x0001D000
+
+/* EMC FLI Registers */
+#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
+#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
+#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
+#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
+#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
+#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
+#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
+
+/**
+ * RO register
+ * 31:16 -- Vendor Id
+ * 15:0 -- Device Id
+ */
+#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
+#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
+
+/**
+ * RAD (RxAdm) Block Register Address Offset from BAR0
+ * RAD0 Range : 0x20000 - 0x203FF
+ * RAD1 Range : 0x20400 - 0x207FF
+ */
+#define RAD0_BLK_REG_ADDR 0x00020000
+#define RAD1_BLK_REG_ADDR 0x00020400
+
+/* RAD0 Registers */
+#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
+#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
+#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
+
+/* Default function ID register */
+#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
+
+/* Default promiscuous ID register */
+#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
+
+#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
+
+/*
+ * This register selects 1 of 8 PM Q's using
+ * VLAN pri, for non-BCN packets without a VLAN tag
+ */
+#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
+
+#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
+#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
+#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
+#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
+#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
+
+#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
+#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
+#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
+#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
+#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
+#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
+#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
+#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
+#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
+#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
+#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
+#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
+#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
+#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
+#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
+#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
+#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
+
+#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
+#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
+#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
+#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
+#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
+#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
+#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
+#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
+
+#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
+
+/* RAD1 Registers */
+#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
+#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
+#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
+
+/* Default function ID register */
+#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
+
+/* Promiscuous function ID register */
+#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
+
+#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
+
+/*
+ * This register selects 1 of 8 PM Q's using
+ * VLAN pri, for non-BCN packets without a VLAN tag
+ */
+#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
+
+#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
+#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
+#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
+
+/**
+ * TXA Block Register Address Offset from BAR0
+ * TXA0 Range : 0x21000 - 0x213FF
+ * TXA1 Range : 0x21400 - 0x217FF
+ */
+#define TXA0_BLK_REG_ADDR 0x00021000
+#define TXA1_BLK_REG_ADDR 0x00021400
+
+/* TXA Registers */
+#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
+#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
+
+/**
+ * TSO Sequence # Registers (RO)
+ * Total 8 (for 8 queues)
+ * Holds the last seq.# for TSO frames
+ * See catapult_spec.pdf for more details
+ */
+#define TXA0_TSO_TCP_SEQ_REG(_num) \
+ (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+#define TXA1_TSO_TCP_SEQ_REG(_num) \
+ (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
+
+/**
+ * TSO IP ID # Registers (RO)
+ * Total 8 (for 8 queues)
+ * Holds the last IP ID for TSO frames
+ * See catapult_spec.pdf for more details
+ */
+#define TXA0_TSO_IP_INFO_REG(_num) \
+ (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+#define TXA1_TSO_IP_INFO_REG(_num) \
+ (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+/**
+ * RXA Block Register Address Offset from BAR0
+ * RXA0 Range : 0x21800 - 0x21BFF
+ * RXA1 Range : 0x21C00 - 0x21FFF
+ */
+#define RXA0_BLK_REG_ADDR 0x00021800
+#define RXA1_BLK_REG_ADDR 0x00021C00
+
+/* RXA Registers */
+#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
+#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
+
+/**
+ * PPLB Block Register Address Offset from BAR0
+ * PPLB0 Range : 0x22000 - 0x223FF
+ * PPLB1 Range : 0x22400 - 0x227FF
+ */
+#define PLB0_BLK_REG_ADDR 0x00022000
+#define PLB1_BLK_REG_ADDR 0x00022400
+
+/**
+ * PLB Registers
+ * Holds RL timer used time stamps in RLT tagged frames
+ */
+#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
+#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
+
+/* Controls the rate-limiter on each of the priority class */
+#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
+#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
+
+/**
+ * Max byte register, total 8, 0-7
+ * see catapult_spec.pdf for details
+ */
+#define PLB0_RL_MAX_BC(_num) \
+ (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
+#define PLB1_RL_MAX_BC(_num) \
+ (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
+
+/**
+ * RL Time Unit Register for priority 0-7
+ * 4 bits per priority
+ * (2^rl_unit)*1us is the actual time period
+ */
+#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
+#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
+
+/**
+ * RL byte count register,
+ * bytes transmitted in (rl_unit*1)us time period
+ * 1 per priority, 8 in all, 0-7.
+ */
+#define PLB0_RL_BYTE_CNT(_num) \
+ (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
+#define PLB1_RL_BYTE_CNT(_num) \
+ (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
+
+/**
+ * RL Min factor register
+ * 2 bits per priority,
+ * 4 factors possible: 1, 0.5, 0.25, 0
+ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
+ */
+#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
+#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
+
+/**
+ * RL Max factor register
+ * 2 bits per priority,
+ * 4 factors possible: 1, 0.5, 0.25, 0
+ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
+ */
+#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
+#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
+
+/* MAC SERDES Address Paging register */
+#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
+#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
+
+/* LL EMS Registers */
+#define LL_EMS0_BLK_REG_ADDR 0x00026800
+#define LL_EMS1_BLK_REG_ADDR 0x00026C00
+
+/**
+ * BPC Block Register Address Offset from BAR0
+ * BPC0 Range : 0x23000 - 0x233FF
+ * BPC1 Range : 0x23400 - 0x237FF
+ */
+#define BPC0_BLK_REG_ADDR 0x00023000
+#define BPC1_BLK_REG_ADDR 0x00023400
+
+/**
+ * PMM Block Register Address Offset from BAR0
+ * PMM0 Range : 0x23800 - 0x23BFF
+ * PMM1 Range : 0x23C00 - 0x23FFF
+ */
+#define PMM0_BLK_REG_ADDR 0x00023800
+#define PMM1_BLK_REG_ADDR 0x00023C00
+
+/**
+ * HQM Block Register Address Offset from BAR0
+ * HQM0 Range : 0x24000 - 0x243FF
+ * HQM1 Range : 0x24400 - 0x247FF
+ */
+#define HQM0_BLK_REG_ADDR 0x00024000
+#define HQM1_BLK_REG_ADDR 0x00024400
+
+/**
+ * HQM Control Register
+ * Controls some aspects of IB
+ * See catapult_spec.pdf for details
+ */
+#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
+#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
+
+/**
+ * HQM Stop Q Semaphore Registers.
+ * Only one Queue resource can be stopped at
+ * any given time. This register controls access
+ * to the single stop Q resource.
+ * See catapult_spec.pdf for details
+ */
+#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
+#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
+#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
+#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
+
+/**
+ * LUT Block Register Address Offset from BAR0
+ * LUT0 Range : 0x25800 - 0x25BFF
+ * LUT1 Range : 0x25C00 - 0x25FFF
+ */
+#define LUT0_BLK_REG_ADDR 0x00025800
+#define LUT1_BLK_REG_ADDR 0x00025C00
+
+/**
+ * LUT Registers
+ * See catapult_spec.pdf for details
+ */
+#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
+#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
+#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
+#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
+
+/**
+ * TRC (Debug/Trace) Register Offset from BAR0
+ * Range : 0x26000 -- 0x263FFF
+ */
+#define TRC_BLK_REG_ADDR 0x00026000
+
+/**
+ * TRC Registers
+ * See catapult_spec.pdf for details of each
+ */
+#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
+#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
+#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
+#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
+#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
+#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
+#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
+
+/**
+ * TRC Trigger match filters, total 10
+ * Determines the trigger condition
+ */
+#define TRC_TRGM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
+
+/**
+ * TRC Next State filters, total 10
+ * Determines the next state conditions
+ */
+#define TRC_NXTM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
+
+/**
+ * TRC Store Match filters, total 10
+ * Determines the store conditions
+ */
+#define TRC_STRM_REG(_num) \
+ (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
+
+/* DOORBELLS ACCESS */
+
+/**
+ * Catapult doorbells
+ * Each doorbell-queue set has
+ * 1 RxQ, 1 TxQ, 2 IBs in that order
+ * Size of each entry in 32 bytes, even though only 1 word
+ * is used. For Non-VM case each doorbell-q set is
+ * separated by 128 bytes, for VM case it is separated
+ * by 4K bytes
+ * Non VM case Range : 0x38000 - 0x39FFF
+ * VM case Range : 0x100000 - 0x11FFFF
+ * The range applies to both HQMs
+ */
+#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
+#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
+
+/* MEMORY ACCESS */
+
+/**
+ * Catapult H/W Block Memory Access Address
+ * To the host a memory space of 32K (page) is visible
+ * at a time. The address range is from 0x08000 to 0x0FFFF
+ */
+#define HW_BLK_HOST_MEM_ADDR 0x08000
+
+/**
+ * Catapult LUT Memory Access Page Numbers
+ * Range : LUT0 0xa0-0xa1
+ * LUT1 0xa2-0xa3
+ */
+#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
+#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
+
+/**
+ * Catapult RxFn Database Memory Block Base Offset
+ *
+ * The Rx function database exists in LUT block.
+ * In PCIe space this is accessible as a 256x32
+ * bit block. Each entry in this database is 4
+ * (4 byte) words. Max. entries is 64.
+ * Address of an entry corresponding to a function
+ * = base_addr + (function_no. * 16)
+ */
+#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
+
+/**
+ * Catapult TxFn Database Memory Block Base Offset Address
+ *
+ * The Tx function database exists in LUT block.
+ * In PCIe space this is accessible as a 64x32
+ * bit block. Each entry in this database is 1
+ * (4 byte) word. Max. entries is 64.
+ * Address of an entry corresponding to a function
+ * = base_addr + (function_no. * 4)
+ */
+#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
+
+/**
+ * Catapult Unicast CAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x48 bits; mapped to PCIe space
+ * 512x32 bit blocks. For each address, bits
+ * are written in the order : [47:32] and then
+ * [31:0].
+ */
+#define UCAST_CAM_BASE_OFFSET 0x0000A800
+
+/**
+ * Catapult Unicast RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x9 bits.
+ */
+#define UCAST_RAM_BASE_OFFSET 0x0000B000
+
+/**
+ * Catapult Mulicast CAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Shared by both the LL & FCoE driver.
+ * Size is 256x48 bits; mapped to PCIe space
+ * 512x32 bit blocks. For each address, bits
+ * are written in the order : [47:32] and then
+ * [31:0].
+ */
+#define MCAST_CAM_BASE_OFFSET 0x0000A000
+
+/**
+ * Catapult VLAN RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 4096x66 bits; mapped to PCIe space as
+ * 8192x32 bit blocks.
+ * All the 4K entries are within the address range
+ * 0x0000 to 0x8000, so in the first LUT page.
+ */
+#define VLAN_RAM_BASE_OFFSET 0x00000000
+
+/**
+ * Catapult Tx Stats RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 1024x33 bits;
+ * Each Tx function has 64 bytes of space
+ */
+#define TX_STATS_RAM_BASE_OFFSET 0x00009000
+
+/**
+ * Catapult Rx Stats RAM Base Offset Address
+ *
+ * Exists in LUT memory space.
+ * Size is 1024x33 bits;
+ * Each Rx function has 64 bytes of space
+ */
+#define RX_STATS_RAM_BASE_OFFSET 0x00008000
+
+/* Catapult RXA Memory Access Page Numbers */
+#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
+#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
+
+/**
+ * Catapult Multicast Vector Table Base Offset Address
+ *
+ * Exists in RxA memory space.
+ * Organized as 512x65 bit block.
+ * However for each entry 16 bytes allocated (power of 2)
+ * Total size 512*16 bytes.
+ * There are two logical divisions, 256 entries each :
+ * a) Entries 0x00 to 0xff (256) -- Approx. MVT
+ * Offset 0x000 to 0xFFF
+ * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
+ * Offsets 0x1000 to 0x1FFF
+ */
+#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
+#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
+
+/**
+ * Catapult RxQ Translate Table (RIT) Base Offset Address
+ *
+ * Exists in RxA memory space
+ * Total no. of entries 64
+ * Each entry is 1 (4 byte) word.
+ * 31:12 -- Reserved
+ * 11:0 -- Two 6 bit RxQ Ids
+ */
+#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
+
+/* Catapult RxAdm (RAD) Memory Access Page Numbers */
+#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
+#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
+
+/**
+ * Catapult RSS Table Base Offset Address
+ *
+ * Exists in RAD memory space.
+ * Each entry is 352 bits, but alligned on
+ * 64 byte (512 bit) boundary. Accessed
+ * 4 byte words, the whole entry can be
+ * broken into 11 word accesses.
+ */
+#define RSS_TABLE_BASE_OFFSET 0x00000800
+
+/**
+ * Catapult CPQ Block Page Number
+ * This value is written to the page number registers
+ * to access the memory associated with the mailboxes.
+ */
+#define CPQ_BLK_PG_NUM 0x00000005
+
+/**
+ * Clarification :
+ * LL functions are 2 & 3; can HostFn0/HostFn1
+ * <-> LPU0/LPU1 memories be used ?
+ */
+/**
+ * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
+ * Per catapult_spec.pdf, the offset of the mbox
+ * memory is in the register space at an offset of 0x200
+ */
+#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
+
+#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
+
+/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
+#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
+
+/**
+ * Catapult HQM Block Page Number
+ * This is written to the page number register for
+ * the appropriate function to access the memory
+ * associated with HQM
+ */
+#define HQM0_BLK_PG_NUM 0x00000096
+#define HQM1_BLK_PG_NUM 0x00000097
+
+/**
+ * Note that TxQ and RxQ entries are interlaced
+ * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
+ */
+
+#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
+
+/**
+ * CQ Memory
+ * Exists in HQM Memory space
+ * Each entry is 16 (4 byte) words of which
+ * only 12 words are used for configuration
+ * Total 64 entries per HQM memory space
+ */
+#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
+
+/**
+ * Interrupt Block (IB) Memory
+ * Exists in HQM Memory space
+ * Each entry is 8 (4 byte) words of which
+ * only 5 words are used for configuration
+ * Total 128 entries per HQM memory space
+ */
+#define HQM_IB_RAM_BASE_OFFSET 0x00001000
+
+/**
+ * Index Table (IT) Memory
+ * Exists in HQM Memory space
+ * Each entry is 1 (4 byte) word which
+ * is used for configuration
+ * Total 128 entries per HQM memory space
+ */
+#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
+
+/**
+ * PSS Block Memory Page Number
+ * This is written to the appropriate page number
+ * register to access the CPU memory.
+ * Also known as the PSS secondary memory (SMEM).
+ * Range : 0x180 to 0x1CF
+ * See catapult_spec.pdf for details
+ */
+#define PSS_BLK_PG_NUM 0x00000180
+
+/**
+ * Offsets of different instances of PSS SMEM
+ * 2.5M of continuous 1T memory space : 2 blocks
+ * of 1M each (32 pages each, page=32KB) and 4 smaller
+ * blocks of 128K each (4 pages each, page=32KB)
+ * PSS_LMEM_INST0 is used for firmware download
+ */
+#define PSS_LMEM_INST0 0x00000000
+#define PSS_LMEM_INST1 0x00100000
+#define PSS_LMEM_INST2 0x00200000
+#define PSS_LMEM_INST3 0x00220000
+#define PSS_LMEM_INST4 0x00240000
+#define PSS_LMEM_INST5 0x00260000
+
+#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
+
+#define BNA_GET_PAGE_NUM(_base_page, _offset) \
+ ((_base_page) + ((_offset) >> 15))
+
+#define BNA_GET_PAGE_OFFSET(_offset) \
+ ((_offset) & 0x7fff)
+
+#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
+ ((_bar0) + HW_BLK_HOST_MEM_ADDR \
+ + BNA_GET_PAGE_OFFSET((_base_offset)))
+
+#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
+ (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
+ + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
+ + (((_fn_id) & 0x3f) << 9) \
+ + (((_vlan_id) & 0xfe0) >> 3))
+
+/**
+ *
+ * Interrupt related bits, flags and macros
+ *
+ */
+
+#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
+#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
+#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
+#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
+
+#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
+#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
+#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
+#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
+
+#define __LPU2HOST_MBOX_MASK_BITS \
+ (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
+ __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
+
+#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
+
+#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
+ ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS))
+
+#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
+ ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS))
+
+#define BNA_IS_MBOX_INTR(_intr_status) \
+ ((_intr_status) & \
+ (__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS))
+
+#define __EMC_ERROR_STATUS_BITS 0x00010000
+#define __LPU0_ERROR_STATUS_BITS 0x00020000
+#define __LPU1_ERROR_STATUS_BITS 0x00040000
+#define __PSS_ERROR_STATUS_BITS 0x00080000
+
+#define __HALT_STATUS_BITS 0x01000000
+
+#define __EMC_ERROR_MASK_BITS 0x00010000
+#define __LPU0_ERROR_MASK_BITS 0x00020000
+#define __LPU1_ERROR_MASK_BITS 0x00040000
+#define __PSS_ERROR_MASK_BITS 0x00080000
+
+#define __HALT_MASK_BITS 0x01000000
+
+#define __ERROR_MASK_BITS \
+ (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
+ __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
+ __HALT_MASK_BITS)
+
+#define BNA_IS_ERR_INTR(_intr_status) \
+ ((_intr_status) & \
+ (__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
+ __HALT_STATUS_BITS))
+
+#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
+ (BNA_IS_MBOX_INTR((_intr_status)) | \
+ BNA_IS_ERR_INTR((_intr_status)))
+
+#define BNA_IS_INTX_DATA_INTR(_intr_status) \
+ ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
+
+#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
+do { \
+ (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS); \
+} while (0)
+
+#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
+do { \
+ (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
+ __HALT_STATUS_BITS); \
+} while (0)
+
+#define bna_intx_disable(_bna, _cur_mask) \
+{ \
+ (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
+ writel(0xffffffff, (_bna)->regs.fn_int_mask);\
+}
+
+#define bna_intx_enable(bna, new_mask) \
+ writel((new_mask), (bna)->regs.fn_int_mask)
+
+#define bna_mbox_intr_disable(bna) \
+ writel((readl((bna)->regs.fn_int_mask) | \
+ (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_mbox_intr_enable(bna) \
+ writel((readl((bna)->regs.fn_int_mask) & \
+ ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_intr_status_get(_bna, _status) \
+{ \
+ (_status) = readl((_bna)->regs.fn_int_status); \
+ if ((_status)) { \
+ writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
+ __LPU02HOST_MBOX1_STATUS_BITS |\
+ __LPU12HOST_MBOX0_STATUS_BITS |\
+ __LPU12HOST_MBOX1_STATUS_BITS), \
+ (_bna)->regs.fn_int_status);\
+ } \
+}
+
+#define bna_intr_status_get_no_clr(_bna, _status) \
+ (_status) = readl((_bna)->regs.fn_int_status)
+
+#define bna_intr_mask_get(bna, mask) \
+ (*mask) = readl((bna)->regs.fn_int_mask)
+
+#define bna_intr_ack(bna, intr_bmap) \
+ writel((intr_bmap), (bna)->regs.fn_int_status)
+
+#define bna_ib_intx_disable(bna, ib_id) \
+ writel(readl((bna)->regs.fn_int_mask) | \
+ (1 << (ib_id)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_ib_intx_enable(bna, ib_id) \
+ writel(readl((bna)->regs.fn_int_mask) & \
+ ~(1 << (ib_id)), \
+ (bna)->regs.fn_int_mask)
+
+#define bna_mbox_msix_idx_set(_device) \
+do {\
+ writel(((_device)->vector & 0x000001FF), \
+ (_device)->bna->pcidev.pci_bar_kva + \
+ reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
+} while (0)
+
+/**
+ *
+ * TxQ, RxQ, CQ related bits, offsets, macros
+ *
+ */
+
+#define BNA_Q_IDLE_STATE 0x00008001
+
+#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
+ ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
+
+#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
+ ((HQM_DOORBELL_BLK_BASE_ADDR) \
+ + (_entry << 7))
+
+#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
+ (0x80000000 | ((_timeout) << 16) | (_events))
+
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+
+/* TxQ Entry Opcodes */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
+
+/* TxQ Entry Control Flags */
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+
+#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
+ (((_hdr_size) << 10) | ((_offset) & 0x3FF))
+
+/*
+ * Completion Q defines
+ */
+/* CQ Entry Flags */
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+
+#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
+#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
+#define BNA_CQ_EF_HDS_HEADER (1 << 7)
+
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
+#define BNA_CQ_EF_IPV6 (1 << 11)
+
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
+
+#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
+
+#define BNA_CQ_EF_LOCAL (1 << 20)
+
+/**
+ *
+ * Data structures
+ *
+ */
+
+enum txf_flags {
+ BFI_TXF_CF_ENABLE = 1 << 0,
+ BFI_TXF_CF_VLAN_FILTER = 1 << 8,
+ BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
+ BFI_TXF_CF_VLAN_INSERT = 1 << 10,
+ BFI_TXF_CF_RSVD1 = 1 << 11,
+ BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
+ BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
+ BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
+ BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
+ BFI_TXF_CF_RSVD2 = 0x7F << 1
+};
+
+enum ib_flags {
+ BFI_IB_CF_MASTER_ENABLE = (1 << 0),
+ BFI_IB_CF_MSIX_MODE = (1 << 1),
+ BFI_IB_CF_COALESCING_MODE = (1 << 2),
+ BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
+ BFI_IB_CF_INT_ENABLE = (1 << 4),
+ BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
+ BFI_IB_CF_ACK_PENDING = (1 << 6),
+ BFI_IB_CF_RESERVED1 = (1 << 7)
+};
+
+enum rss_hash_type {
+ BFI_RSS_T_V4_TCP = (1 << 11),
+ BFI_RSS_T_V4_IP = (1 << 10),
+ BFI_RSS_T_V6_TCP = (1 << 9),
+ BFI_RSS_T_V6_IP = (1 << 8)
+};
+enum hds_header_type {
+ BNA_HDS_T_V4_TCP = (1 << 11),
+ BNA_HDS_T_V4_UDP = (1 << 10),
+ BNA_HDS_T_V6_TCP = (1 << 9),
+ BNA_HDS_T_V6_UDP = (1 << 8),
+ BNA_HDS_FORCED = (1 << 7),
+};
+enum rxf_flags {
+ BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
+ BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
+ BNA_RXF_CF_VLAN_STRIP = (1 << 12),
+ BNA_RXF_CF_RSS_ENABLE = (1 << 8)
+};
+struct bna_chip_regs_offset {
+ u32 page_addr;
+ u32 fn_int_status;
+ u32 fn_int_mask;
+ u32 msix_idx;
+};
+extern const struct bna_chip_regs_offset reg_offset[];
+
+struct bna_chip_regs {
+ void __iomem *page_addr;
+ void __iomem *fn_int_status;
+ void __iomem *fn_int_mask;
+};
+
+struct bna_txq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
+ /* 23:16->Int Blk Offset */
+ /* 15:0 ->consumer pointer(index?) */
+ u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
+ u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
+ /* QId;9:3->FID;2:0->Priority */
+ u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
+ /* 23:12->Cfg Quota; */
+ /* 11:0 ->Run Quota */
+ u32 reserved3[4];
+};
+
+struct bna_rxq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
+ /* 23:16->CQ; */
+ /* 15:0->consumer pointer(index?) */
+ u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
+ u32 next_qid; /* 17:10->next QId */
+ u32 reserved3;
+ u32 reserved4[4];
+};
+
+struct bna_rxtx_q_mem {
+ struct bna_rxq_mem rxq;
+ struct bna_txq_mem txq;
+};
+
+struct bna_cq_mem {
+ u32 pg_tbl_addr_lo;
+ u32 pg_tbl_addr_hi;
+ u32 cur_q_entry_lo;
+ u32 cur_q_entry_hi;
+
+ u32 reserved1;
+ u32 reserved2;
+ u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
+ /* 15:0 ->producer pointer (index?) */
+ u32 entry_n_pg_size; /* 31:16->entry size */
+ /* 15:0 ->page size */
+ u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
+ /* 23:16->Int Blk Offset */
+ /* 15:0 ->consumer pointer(index?) */
+ u32 q_state; /* 31:16->reserved; 15:0-> Q state */
+ u32 reserved3[2];
+ u32 reserved4[4];
+};
+
+struct bna_ib_blk_mem {
+ u32 host_addr_lo;
+ u32 host_addr_hi;
+ u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
+ /* 23:16->coalescing cfg; */
+ /* 15:8 ->control; */
+ /* 7:0 ->msix; */
+ u32 ipkt_n_ent_n_idxof;
+ u32 ipkt_cnt_cfg_n_unacked;
+
+ u32 reserved[3];
+};
+
+struct bna_idx_tbl_mem {
+ u32 idx; /* !< 31:16->res;15:0->idx; */
+};
+
+struct bna_doorbell_qset {
+ u32 rxq[0x20 >> 2];
+ u32 txq[0x20 >> 2];
+ u32 ib0[0x20 >> 2];
+ u32 ib1[0x20 >> 2];
+};
+
+struct bna_rx_fndb_ram {
+ u32 rss_prop;
+ u32 size_routing_props;
+ u32 rit_hds_mcastq;
+ u32 control_flags;
+};
+
+struct bna_tx_fndb_ram {
+ u32 vlan_n_ctrl_flags;
+};
+
+/**
+ * @brief
+ * Structure which maps to RxFn Indirection Table (RIT)
+ * Size : 1 word
+ * See catapult_spec.pdf, RxA for details
+ */
+struct bna_rit_mem {
+ u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
+};
+
+/**
+ * @brief
+ * Structure which maps to RSS Table entry
+ * Size : 16 words
+ * See catapult_spec.pdf, RAD for details
+ */
+struct bna_rss_mem {
+ /*
+ * 31:12-> res
+ * 11:8 -> protocol type
+ * 7:0 -> hash index
+ */
+ u32 type_n_hash;
+ u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
+ u32 reserved[5];
+};
+
+/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
+struct bna_dma_addr {
+ u32 msb;
+ u32 lsb;
+};
+
+struct bna_txq_wi_vector {
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
+ struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
+};
+
+typedef u16 bna_txq_wi_opcode_t;
+
+typedef u16 bna_txq_wi_ctrl_flag_t;
+
+/**
+ * TxQ Entry Structure
+ *
+ * BEWARE: Load values into this structure with correct endianess.
+ */
+struct bna_txq_entry {
+ union {
+ struct {
+ u8 reserved;
+ u8 num_vectors; /* number of vectors present */
+ bna_txq_wi_opcode_t opcode; /* Either */
+ /* BNA_TXQ_WI_SEND or */
+ /* BNA_TXQ_WI_SEND_LSO */
+ bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
+ u16 l4_hdr_size_n_offset;
+ u16 vlan_tag;
+ u16 lso_mss; /* Only 14 LSB are valid */
+ u32 frame_length; /* Only 24 LSB are valid */
+ } wi;
+
+ struct {
+ u16 reserved;
+ bna_txq_wi_opcode_t opcode; /* Must be */
+ /* BNA_TXQ_WI_EXTENSION */
+ u32 reserved2[3]; /* Place holder for */
+ /* removed vector (12 bytes) */
+ } wi_ext;
+ } hdr;
+ struct bna_txq_wi_vector vector[4];
+};
+#define wi_hdr hdr.wi
+#define wi_ext_hdr hdr.wi_ext
+
+/* RxQ Entry Structure */
+struct bna_rxq_entry { /* Rx-Buffer */
+ struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
+};
+
+typedef u32 bna_cq_e_flag_t;
+
+/* CQ Entry Structure */
+struct bna_cq_entry {
+ bna_cq_e_flag_t flags;
+ u16 vlan_tag;
+ u16 length;
+ u32 rss_hash;
+ u8 valid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 rxq_id;
+};
+
+#endif /* __BNA_HW_H__ */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
new file mode 100644
index 000000000000..890846d55502
--- /dev/null
+++ b/drivers/net/bna/bna_txrx.c
@@ -0,0 +1,4209 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include "bna.h"
+#include "bfa_sm.h"
+#include "bfi.h"
+
+/**
+ * IB
+ */
+#define bna_ib_find_free_ibidx(_mask, _pos)\
+do {\
+ (_pos) = 0;\
+ while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
+ ((1 << (_pos)) & (_mask)))\
+ (_pos)++;\
+} while (0)
+
+#define bna_ib_count_ibidx(_mask, _count)\
+do {\
+ int pos = 0;\
+ (_count) = 0;\
+ while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
+ if ((1 << pos) & (_mask))\
+ (_count) = pos + 1;\
+ pos++;\
+ } \
+} while (0)
+
+#define bna_ib_select_segpool(_count, _q_idx)\
+do {\
+ int i;\
+ (_q_idx) = -1;\
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
+ if ((_count <= ibidx_pool[i].pool_entry_size)) {\
+ (_q_idx) = i;\
+ break;\
+ } \
+ } \
+} while (0)
+
+struct bna_ibidx_pool {
+ int pool_size;
+ int pool_entry_size;
+};
+init_ibidx_pool(ibidx_pool);
+
+static struct bna_intr *
+bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
+ int vector)
+{
+ struct bna_intr *intr;
+ struct list_head *qe;
+
+ list_for_each(qe, &ib_mod->intr_active_q) {
+ intr = (struct bna_intr *)qe;
+
+ if ((intr->intr_type == intr_type) &&
+ (intr->vector == vector)) {
+ intr->ref_count++;
+ return intr;
+ }
+ }
+
+ if (list_empty(&ib_mod->intr_free_q))
+ return NULL;
+
+ bfa_q_deq(&ib_mod->intr_free_q, &intr);
+ bfa_q_qe_init(&intr->qe);
+
+ intr->ref_count = 1;
+ intr->intr_type = intr_type;
+ intr->vector = vector;
+
+ list_add_tail(&intr->qe, &ib_mod->intr_active_q);
+
+ return intr;
+}
+
+static void
+bna_intr_put(struct bna_ib_mod *ib_mod,
+ struct bna_intr *intr)
+{
+ intr->ref_count--;
+
+ if (intr->ref_count == 0) {
+ intr->ib = NULL;
+ list_del(&intr->qe);
+ bfa_q_qe_init(&intr->qe);
+ list_add_tail(&intr->qe, &ib_mod->intr_free_q);
+ }
+}
+
+void
+bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+ int j;
+ int count;
+ u8 offset;
+ struct bna_doorbell_qset *qset;
+ unsigned long off;
+
+ ib_mod->bna = bna;
+
+ ib_mod->ib = (struct bna_ib *)
+ res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
+ ib_mod->intr = (struct bna_intr *)
+ res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
+ ib_mod->idx_seg = (struct bna_ibidx_seg *)
+ res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&ib_mod->ib_free_q);
+ INIT_LIST_HEAD(&ib_mod->intr_free_q);
+ INIT_LIST_HEAD(&ib_mod->intr_active_q);
+
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
+ INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
+
+ for (i = 0; i < BFI_MAX_IB; i++) {
+ ib_mod->ib[i].ib_id = i;
+
+ ib_mod->ib[i].ib_seg_host_addr_kva =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
+ ib_mod->ib[i].ib_seg_host_addr.lsb =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
+ ib_mod->ib[i].ib_seg_host_addr.msb =
+ res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
+
+ qset = (struct bna_doorbell_qset *)0;
+ off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
+ * (0x20 >> 2)]);
+ ib_mod->ib[i].door_bell.doorbell_addr = off +
+ BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
+
+ bfa_q_qe_init(&ib_mod->ib[i].qe);
+ list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
+
+ bfa_q_qe_init(&ib_mod->intr[i].qe);
+ list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
+ }
+
+ count = 0;
+ offset = 0;
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
+ for (j = 0; j < ibidx_pool[i].pool_size; j++) {
+ bfa_q_qe_init(&ib_mod->idx_seg[count]);
+ ib_mod->idx_seg[count].ib_seg_size =
+ ibidx_pool[i].pool_entry_size;
+ ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
+ list_add_tail(&ib_mod->idx_seg[count].qe,
+ &ib_mod->ibidx_seg_pool[i]);
+ count++;
+ offset += ibidx_pool[i].pool_entry_size;
+ }
+ }
+}
+
+void
+bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
+{
+ int i;
+ int j;
+ struct list_head *qe;
+
+ i = 0;
+ list_for_each(qe, &ib_mod->ib_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &ib_mod->intr_free_q)
+ i++;
+
+ for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
+ j = 0;
+ list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
+ j++;
+ }
+
+ ib_mod->bna = NULL;
+}
+
+struct bna_ib *
+bna_ib_get(struct bna_ib_mod *ib_mod,
+ enum bna_intr_type intr_type,
+ int vector)
+{
+ struct bna_ib *ib;
+ struct bna_intr *intr;
+
+ if (intr_type == BNA_INTR_T_INTX)
+ vector = (1 << vector);
+
+ intr = bna_intr_get(ib_mod, intr_type, vector);
+ if (intr == NULL)
+ return NULL;
+
+ if (intr->ib) {
+ if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
+ bna_intr_put(ib_mod, intr);
+ return NULL;
+ }
+ intr->ib->ref_count++;
+ return intr->ib;
+ }
+
+ if (list_empty(&ib_mod->ib_free_q)) {
+ bna_intr_put(ib_mod, intr);
+ return NULL;
+ }
+
+ bfa_q_deq(&ib_mod->ib_free_q, &ib);
+ bfa_q_qe_init(&ib->qe);
+
+ ib->ref_count = 1;
+ ib->start_count = 0;
+ ib->idx_mask = 0;
+
+ ib->intr = intr;
+ ib->idx_seg = NULL;
+ intr->ib = ib;
+
+ ib->bna = ib_mod->bna;
+
+ return ib;
+}
+
+void
+bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
+{
+ bna_intr_put(ib_mod, ib->intr);
+
+ ib->ref_count--;
+
+ if (ib->ref_count == 0) {
+ ib->intr = NULL;
+ ib->bna = NULL;
+ list_add_tail(&ib->qe, &ib_mod->ib_free_q);
+ }
+}
+
+/* Returns index offset - starting from 0 */
+int
+bna_ib_reserve_idx(struct bna_ib *ib)
+{
+ struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+ struct bna_ibidx_seg *idx_seg;
+ int idx;
+ int num_idx;
+ int q_idx;
+
+ /* Find the first free index position */
+ bna_ib_find_free_ibidx(ib->idx_mask, idx);
+ if (idx == BFI_IBIDX_MAX_SEGSIZE)
+ return -1;
+
+ /*
+ * Calculate the total number of indexes held by this IB,
+ * including the index newly reserved above.
+ */
+ bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
+
+ /* See if there is a free space in the index segment held by this IB */
+ if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
+ ib->idx_mask |= (1 << idx);
+ return idx;
+ }
+
+ if (ib->start_count)
+ return -1;
+
+ /* Allocate a new segment */
+ bna_ib_select_segpool(num_idx, q_idx);
+ while (1) {
+ if (q_idx == BFI_IBIDX_TOTAL_POOLS)
+ return -1;
+ if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
+ break;
+ q_idx++;
+ }
+ bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
+ bfa_q_qe_init(&idx_seg->qe);
+
+ /* Free the old segment */
+ if (ib->idx_seg) {
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
+ list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
+ }
+
+ ib->idx_seg = idx_seg;
+
+ ib->idx_mask |= (1 << idx);
+
+ return idx;
+}
+
+void
+bna_ib_release_idx(struct bna_ib *ib, int idx)
+{
+ struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+ struct bna_ibidx_seg *idx_seg;
+ int num_idx;
+ int cur_q_idx;
+ int new_q_idx;
+
+ ib->idx_mask &= ~(1 << idx);
+
+ if (ib->start_count)
+ return;
+
+ bna_ib_count_ibidx(ib->idx_mask, num_idx);
+
+ /*
+ * Free the segment, if there are no more indexes in the segment
+ * held by this IB
+ */
+ if (!num_idx) {
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
+ list_add_tail(&ib->idx_seg->qe,
+ &ib_mod->ibidx_seg_pool[cur_q_idx]);
+ ib->idx_seg = NULL;
+ return;
+ }
+
+ /* See if we can move to a smaller segment */
+ bna_ib_select_segpool(num_idx, new_q_idx);
+ bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
+ while (new_q_idx < cur_q_idx) {
+ if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
+ break;
+ new_q_idx++;
+ }
+ if (new_q_idx < cur_q_idx) {
+ /* Select the new smaller segment */
+ bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
+ bfa_q_qe_init(&idx_seg->qe);
+ /* Free the old segment */
+ list_add_tail(&ib->idx_seg->qe,
+ &ib_mod->ibidx_seg_pool[cur_q_idx]);
+ ib->idx_seg = idx_seg;
+ }
+}
+
+int
+bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
+{
+ if (ib->start_count)
+ return -1;
+
+ ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
+ ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
+ ib->ib_config.interpkt_count = ib_config->interpkt_count;
+ ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
+
+ ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
+ if (ib->intr->intr_type == BNA_INTR_T_MSIX)
+ ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
+
+ return 0;
+}
+
+void
+bna_ib_start(struct bna_ib *ib)
+{
+ struct bna_ib_blk_mem ib_cfg;
+ struct bna_ib_blk_mem *ib_mem;
+ u32 pg_num;
+ u32 intx_mask;
+ int i;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ ib->start_count++;
+
+ if (ib->start_count > 1)
+ return;
+
+ ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
+ ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
+
+ ib_cfg.clsc_n_ctrl_n_msix = (((u32)
+ ib->ib_config.coalescing_timeo << 16) |
+ ((u32)ib->ib_config.ctrl_flags << 8) |
+ (ib->intr->vector));
+ ib_cfg.ipkt_n_ent_n_idxof =
+ ((u32)
+ (ib->ib_config.interpkt_timeo & 0xf) << 16) |
+ ((u32)ib->idx_seg->ib_seg_size << 8) |
+ (ib->idx_seg->ib_idx_tbl_offset);
+ ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
+ ib->ib_config.interpkt_count << 24);
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
+ HQM_IB_RAM_BASE_OFFSET);
+ writel(pg_num, ib->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
+ HQM_IB_RAM_BASE_OFFSET);
+
+ ib_mem = (struct bna_ib_blk_mem *)0;
+ off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
+ writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
+ writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
+ writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
+ writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
+
+ off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
+ writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
+
+ ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
+ (u32)ib->ib_config.coalescing_timeo, 0);
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ writel(pg_num, ib->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
+ HQM_INDX_TBL_RAM_BASE_OFFSET);
+ for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
+ off = (unsigned long)
+ ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
+ writel(0, base_addr + off);
+ }
+
+ if (ib->intr->intr_type == BNA_INTR_T_INTX) {
+ bna_intx_disable(ib->bna, intx_mask);
+ intx_mask &= ~(ib->intr->vector);
+ bna_intx_enable(ib->bna, intx_mask);
+ }
+}
+
+void
+bna_ib_stop(struct bna_ib *ib)
+{
+ u32 intx_mask;
+
+ ib->start_count--;
+
+ if (ib->start_count == 0) {
+ writel(BNA_DOORBELL_IB_INT_DISABLE,
+ ib->door_bell.doorbell_addr);
+ if (ib->intr->intr_type == BNA_INTR_T_INTX) {
+ bna_intx_disable(ib->bna, intx_mask);
+ intx_mask |= (ib->intr->vector);
+ bna_intx_enable(ib->bna, intx_mask);
+ }
+ }
+}
+
+void
+bna_ib_fail(struct bna_ib *ib)
+{
+ ib->start_count = 0;
+}
+
+/**
+ * RXF
+ */
+static void rxf_enable(struct bna_rxf *rxf);
+static void rxf_disable(struct bna_rxf *rxf);
+static void __rxf_config_set(struct bna_rxf *rxf);
+static void __rxf_rit_set(struct bna_rxf *rxf);
+static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
+static int rxf_process_packet_filter(struct bna_rxf *rxf);
+static int rxf_clear_packet_filter(struct bna_rxf *rxf);
+static void rxf_reset_packet_filter(struct bna_rxf *rxf);
+static void rxf_cb_enabled(void *arg, int status);
+static void rxf_cb_disabled(void *arg, int status);
+static void bna_rxf_cb_stats_cleared(void *arg, int status);
+static void __rxf_enable(struct bna_rxf *rxf);
+static void __rxf_disable(struct bna_rxf *rxf);
+
+bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
+ enum bna_rxf_event);
+bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
+ enum bna_rxf_event);
+
+static struct bfa_sm_table rxf_sm_table[] = {
+ {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
+ {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
+ {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
+ {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
+ {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
+ {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
+ {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
+ {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
+ {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
+};
+
+static void
+bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
+{
+ call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
+}
+
+static void
+bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_START:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
+ break;
+
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_FAIL:
+ /* No-op */
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ case RXF_E_STARTED:
+ case RXF_E_STOPPED:
+ case RXF_E_CAM_FLTR_RESP:
+ /**
+ * These events are received due to flushing of mbox
+ * when device fails
+ */
+ /* No-op */
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ case RXF_E_RESUME:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
+{
+ __rxf_config_set(rxf);
+ __rxf_rit_set(rxf);
+ rxf_enable(rxf);
+}
+
+static void
+bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ /**
+ * STOP is originated from bnad. When this happens,
+ * it can not be waiting for filter update
+ */
+ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
+ break;
+
+ case RXF_E_FAIL:
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ /* No-op */
+ break;
+
+ case RXF_E_STARTED:
+ /**
+ * Force rxf_process_filter() to go through initial
+ * config
+ */
+ if ((rxf->ucast_active_mac != NULL) &&
+ (rxf->ucast_pending_set == 0))
+ rxf->ucast_pending_set = 1;
+
+ if (rxf->rss_status == BNA_STATUS_T_ENABLED)
+ rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ case RXF_E_RESUME:
+ rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
+{
+ if (!rxf_process_packet_filter(rxf)) {
+ /* No more pending CAM entries to update */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ /**
+ * STOP is originated from bnad. When this happens,
+ * it can not be waiting for filter update
+ */
+ call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
+ break;
+
+ case RXF_E_FAIL:
+ rxf_reset_packet_filter(rxf);
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ /* No-op */
+ break;
+
+ case RXF_E_CAM_FLTR_RESP:
+ if (!rxf_process_packet_filter(rxf)) {
+ /* No more pending CAM entries to update */
+ call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ }
+ break;
+
+ case RXF_E_PAUSE:
+ case RXF_E_RESUME:
+ rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_started_entry(struct bna_rxf *rxf)
+{
+ call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
+
+ if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_PAUSE);
+ else
+ bfa_fsm_send_event(rxf, RXF_E_RESUME);
+ }
+
+}
+
+static void
+bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_STOP:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
+ /* Hack to get FSM start clearing CAM entries */
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
+ break;
+
+ case RXF_E_FAIL:
+ rxf_reset_packet_filter(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_MOD:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
+ break;
+
+ case RXF_E_RESUME:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
+{
+ /**
+ * Note: Do not add rxf_clear_packet_filter here.
+ * It will overstep mbox when this transition happens:
+ * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
+ */
+}
+
+static void
+bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ rxf_reset_packet_filter(rxf);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_CAM_FLTR_RESP:
+ if (!rxf_clear_packet_filter(rxf)) {
+ /* No more pending CAM entries to clear */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
+ rxf_disable(rxf);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
+{
+ /**
+ * NOTE: Do not add rxf_disable here.
+ * It will overstep mbox when this transition happens:
+ * start_wait -> stop_wait on RXF_E_STOP event
+ */
+}
+
+static void
+bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STARTED:
+ /**
+ * This event is received due to abrupt transition from
+ * bna_rxf_sm_start_wait state on receiving
+ * RXF_E_STOP event
+ */
+ rxf_disable(rxf);
+ break;
+
+ case RXF_E_STOPPED:
+ /**
+ * FSM was in the process of stopping, initiated by
+ * bnad. When this happens, no one can be waiting for
+ * start or filter update
+ */
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
+ break;
+
+ case RXF_E_PAUSE:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ break;
+
+ case RXF_E_RESUME:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags &=
+ ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
+ __rxf_disable(rxf);
+}
+
+static void
+bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of disabling rxf, initiated by
+ * bnad.
+ */
+ call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STOPPED:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
+ call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ break;
+
+ /*
+ * Since PAUSE/RESUME can only be sent by bnad, we don't expect
+ * any other event during these states
+ */
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
+ rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
+ __rxf_enable(rxf);
+}
+
+static void
+bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ /**
+ * FSM was in the process of disabling rxf, initiated by
+ * bnad.
+ */
+ call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ case RXF_E_STARTED:
+ rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
+ call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_started);
+ break;
+
+ /*
+ * Since PAUSE/RESUME can only be sent by bnad, we don't expect
+ * any other event during these states
+ */
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
+{
+ __bna_rxf_stat_clr(rxf);
+}
+
+static void
+bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
+{
+ switch (event) {
+ case RXF_E_FAIL:
+ case RXF_E_STAT_CLEARED:
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(rxf->rx->bna, event);
+ }
+}
+
+static void
+__rxf_enable(struct bna_rxf *rxf)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+ ll_req.enable = 1;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ rxf_cb_enabled, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_disable(struct bna_rxf *rxf)
+{
+ struct bfi_ll_rxf_multi_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+ ll_req.enable = 0;
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ rxf_cb_disabled, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+__rxf_config_set(struct bna_rxf *rxf)
+{
+ u32 i;
+ struct bna_rss_mem *rss_mem;
+ struct bna_rx_fndb_ram *rx_fndb_ram;
+ struct bna *bna = rxf->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ RSS_TABLE_BASE_OFFSET);
+
+ rss_mem = (struct bna_rss_mem *)0;
+
+ /* Configure RSS if required */
+ if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
+ /* configure RSS Table */
+ writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
+ bna->port_num, RSS_TABLE_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ /* temporarily disable RSS, while hash value is written */
+ off = (unsigned long)&rss_mem[0].type_n_hash;
+ writel(0, base_addr + off);
+
+ for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
+ off = (unsigned long)
+ &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
+ writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
+ base_addr + off);
+ }
+
+ off = (unsigned long)&rss_mem[0].type_n_hash;
+ writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
+ base_addr + off);
+ }
+
+ /* Configure RxF */
+ writel(BNA_GET_PAGE_NUM(
+ LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
+ RX_FNDB_RAM_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ RX_FNDB_RAM_BASE_OFFSET);
+
+ rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
+
+ /* We always use RSS table 0 */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
+ writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
+ base_addr + off);
+
+ /* small large buffer enable/disable */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
+ writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
+ base_addr + off);
+
+ /* RIT offset, HDS forced offset, multicast RxQ Id */
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
+ writel((rxf->rit_segment->rit_offset << 16) |
+ (rxf->forced_offset << 8) |
+ (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
+ base_addr + off);
+
+ /*
+ * default vlan tag, default function enable, strip vlan bytes,
+ * HDS type, header size
+ */
+
+ off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
+ writel(((u32)rxf->default_vlan_tag << 16) |
+ (rxf->ctrl_flags &
+ (BNA_RXF_CF_DEFAULT_VLAN |
+ BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
+ BNA_RXF_CF_VLAN_STRIP)) |
+ (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
+ rxf->hds_cfg.header_size,
+ base_addr + off);
+}
+
+void
+__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
+{
+ struct bna *bna = rxf->rx->bna;
+ int i;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
+ bna->regs.page_addr);
+
+ if (status == BNA_STATUS_T_ENABLED) {
+ /* enable VLAN filtering on this function */
+ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
+ writel(rxf->vlan_filter_table[i],
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (bna->pcidev.pci_bar_kva, rxf->rxf_id,
+ i * 32));
+ }
+ } else {
+ /* disable VLAN filtering on this function */
+ for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
+ writel(0xffffffff,
+ BNA_GET_VLAN_MEM_ENTRY_ADDR
+ (bna->pcidev.pci_bar_kva, rxf->rxf_id,
+ i * 32));
+ }
+ }
+}
+
+static void
+__rxf_rit_set(struct bna_rxf *rxf)
+{
+ struct bna *bna = rxf->rx->bna;
+ struct bna_rit_mem *rit_mem;
+ int i;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ FUNCTION_TO_RXQ_TRANSLATE);
+
+ rit_mem = (struct bna_rit_mem *)0;
+
+ writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
+ FUNCTION_TO_RXQ_TRANSLATE),
+ bna->regs.page_addr);
+
+ for (i = 0; i < rxf->rit_segment->rit_size; i++) {
+ off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
+ writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
+ rxf->rit_segment->rit[i].small_rxq_id,
+ base_addr + off);
+ }
+}
+
+static void
+__bna_rxf_stat_clr(struct bna_rxf *rxf)
+{
+ struct bfi_ll_stats_req ll_req;
+ u32 bm[2] = {0, 0};
+
+ if (rxf->rxf_id < 32)
+ bm[0] = 1 << rxf->rxf_id;
+ else
+ bm[1] = 1 << (rxf->rxf_id - 32);
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = 0;
+ ll_req.txf_id_mask[0] = 0;
+ ll_req.txf_id_mask[1] = 0;
+
+ ll_req.rxf_id_mask[0] = htonl(bm[0]);
+ ll_req.rxf_id_mask[1] = htonl(bm[1]);
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_rxf_cb_stats_cleared, rxf);
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static void
+rxf_enable(struct bna_rxf *rxf)
+{
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_STARTED);
+ else {
+ rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
+ __rxf_enable(rxf);
+ }
+}
+
+static void
+rxf_cb_enabled(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STARTED);
+}
+
+static void
+rxf_disable(struct bna_rxf *rxf)
+{
+ if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
+ bfa_fsm_send_event(rxf, RXF_E_STOPPED);
+ else
+ rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
+ __rxf_disable(rxf);
+}
+
+static void
+rxf_cb_disabled(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STOPPED);
+}
+
+void
+rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
+}
+
+static void
+bna_rxf_cb_stats_cleared(void *arg, int status)
+{
+ struct bna_rxf *rxf = (struct bna_rxf *)arg;
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+ bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
+}
+
+void
+rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
+ const struct bna_mac *mac_addr)
+{
+ struct bfi_ll_mac_addr_req req;
+
+ bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
+
+ req.rxf_id = rxf->rxf_id;
+ memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
+
+ bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
+ rxf_cb_cam_fltr_mbox_cmd, rxf);
+
+ bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+}
+
+static int
+rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* Add multicast entries */
+ if (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ return 1;
+ }
+
+ /* Delete multicast entries previousely added */
+ if (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
+{
+ /* Apply the VLAN filter */
+ if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
+ rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
+ !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
+ __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ }
+
+ /* Apply RSS configuration */
+ if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
+ rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
+ if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
+ /* RSS is being disabled */
+ rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
+ __rxf_rit_set(rxf);
+ __rxf_config_set(rxf);
+ } else {
+ /* RSS is being enabled or reconfigured */
+ rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
+ __rxf_rit_set(rxf);
+ __rxf_config_set(rxf);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Processes pending ucast, mcast entry addition/deletion and issues mailbox
+ * command. Also processes pending filter configuration - promiscuous mode,
+ * default mode, allmutli mode and issues mailbox command or directly applies
+ * to h/w
+ */
+static int
+rxf_process_packet_filter(struct bna_rxf *rxf)
+{
+ /* Set the default MAC first */
+ if (rxf->ucast_pending_set > 0) {
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
+ rxf->ucast_active_mac);
+ rxf->ucast_pending_set--;
+ return 1;
+ }
+
+ if (rxf_process_packet_filter_ucast(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_mcast(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_promisc(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_default(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_allmulti(rxf))
+ return 1;
+
+ if (rxf_process_packet_filter_vlan(rxf))
+ return 1;
+
+ return 0;
+}
+
+static int
+rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac = NULL;
+ struct list_head *qe;
+
+ /* 3. delete pending mcast entries */
+ if (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ return 1;
+ }
+
+ /* 4. clear active mcast entries; move them to pending_add_q */
+ if (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * In the rxf stop path, processes pending ucast/mcast delete queue and issues
+ * the mailbox command. Moves the active ucast/mcast entries to pending add q,
+ * so that they are added to CAM again in the rxf start path. Moves the current
+ * filter settings - promiscuous, default, allmutli - to pending filter
+ * configuration
+ */
+static int
+rxf_clear_packet_filter(struct bna_rxf *rxf)
+{
+ if (rxf_clear_packet_filter_ucast(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_mcast(rxf))
+ return 1;
+
+ /* 5. clear active default MAC in the CAM */
+ if (rxf->ucast_pending_set > 0)
+ rxf->ucast_pending_set = 0;
+
+ if (rxf_clear_packet_filter_promisc(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_default(rxf))
+ return 1;
+
+ if (rxf_clear_packet_filter_allmulti(rxf))
+ return 1;
+
+ return 0;
+}
+
+static void
+rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
+{
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* 3. Move active mcast entries to pending_add_q */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_add_q);
+ }
+
+ /* 4. Throw away delete pending mcast entries */
+ while (!list_empty(&rxf->mcast_pending_del_q)) {
+ bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
+ bfa_q_qe_init(qe);
+ mac = (struct bna_mac *)qe;
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+}
+
+/**
+ * In the rxf fail path, throws away the ucast/mcast entries pending for
+ * deletion, moves all active ucast/mcast entries to pending queue so that
+ * they are added back to CAM in the rxf start path. Also moves the current
+ * filter configuration to pending filter configuration.
+ */
+static void
+rxf_reset_packet_filter(struct bna_rxf *rxf)
+{
+ rxf_reset_packet_filter_ucast(rxf);
+
+ rxf_reset_packet_filter_mcast(rxf);
+
+ /* 5. Turn off ucast set flag */
+ rxf->ucast_pending_set = 0;
+
+ rxf_reset_packet_filter_promisc(rxf);
+
+ rxf_reset_packet_filter_default(rxf);
+
+ rxf_reset_packet_filter_allmulti(rxf);
+}
+
+void
+bna_rxf_init(struct bna_rxf *rxf,
+ struct bna_rx *rx,
+ struct bna_rx_config *q_config)
+{
+ struct list_head *qe;
+ struct bna_rxp *rxp;
+
+ /* rxf_id is initialized during rx_mod init */
+ rxf->rx = rx;
+
+ INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
+ rxf->ucast_pending_set = 0;
+ INIT_LIST_HEAD(&rxf->ucast_active_q);
+ rxf->ucast_active_mac = NULL;
+
+ INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
+ INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
+ INIT_LIST_HEAD(&rxf->mcast_active_q);
+
+ bfa_q_qe_init(&rxf->mbox_qe.qe);
+
+ if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
+ rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
+
+ rxf->rxf_oper_state = (q_config->paused) ?
+ BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
+
+ bna_rxf_adv_init(rxf, rx, q_config);
+
+ rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
+ q_config->num_paths);
+
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ if (q_config->rxp_type == BNA_RXP_SINGLE)
+ rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
+ else
+ rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
+ break;
+ }
+
+ rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+ memset(rxf->vlan_filter_table, 0,
+ (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
+
+ bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+}
+
+void
+bna_rxf_uninit(struct bna_rxf *rxf)
+{
+ struct bna_mac *mac;
+
+ bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
+ rxf->rit_segment = NULL;
+
+ rxf->ucast_pending_set = 0;
+
+ while (!list_empty(&rxf->ucast_pending_add_q)) {
+ bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+ }
+
+ if (rxf->ucast_active_mac) {
+ bfa_q_qe_init(&rxf->ucast_active_mac->qe);
+ bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
+ rxf->ucast_active_mac);
+ rxf->ucast_active_mac = NULL;
+ }
+
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ rxf->rx = NULL;
+}
+
+void
+bna_rxf_start(struct bna_rxf *rxf)
+{
+ rxf->start_cbfn = bna_rx_cb_rxf_started;
+ rxf->start_cbarg = rxf->rx;
+ rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
+ bfa_fsm_send_event(rxf, RXF_E_START);
+}
+
+void
+bna_rxf_stop(struct bna_rxf *rxf)
+{
+ rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
+ rxf->stop_cbarg = rxf->rx;
+ bfa_fsm_send_event(rxf, RXF_E_STOP);
+}
+
+void
+bna_rxf_fail(struct bna_rxf *rxf)
+{
+ rxf->rxf_flags |= BNA_RXF_FL_FAILED;
+ bfa_fsm_send_event(rxf, RXF_E_FAIL);
+}
+
+int
+bna_rxf_state_get(struct bna_rxf *rxf)
+{
+ return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
+}
+
+enum bna_cb_status
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+
+ if (rxf->ucast_active_mac == NULL) {
+ rxf->ucast_active_mac =
+ bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+ if (rxf->ucast_active_mac == NULL)
+ return BNA_CB_UCAST_CAM_FULL;
+ bfa_q_qe_init(&rxf->ucast_active_mac->qe);
+ }
+
+ memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
+ rxf->ucast_pending_set++;
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ /* Check if already added */
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ /* Check if pending addition */
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ return BNA_CB_MCAST_LIST_FULL;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, addr, ETH_ALEN);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+
+ return BNA_CB_SUCCESS;
+}
+
+enum bna_cb_status
+bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head *qe;
+ struct bna_mac *mac;
+
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+ list_del(qe);
+ bfa_q_qe_init(qe);
+ list_add_tail(qe, &rxf->mcast_pending_del_q);
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ return BNA_CB_SUCCESS;
+ }
+ }
+
+ return BNA_CB_INVALID_MAC;
+}
+
+enum bna_cb_status
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ void (*cbfn)(struct bnad *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ struct list_head list_head;
+ struct list_head *qe;
+ u8 *mcaddr;
+ struct bna_mac *mac;
+ struct bna_mac *mac1;
+ int skip;
+ int delete;
+ int need_hw_config = 0;
+ int i;
+
+ /* Allocate nodes */
+ INIT_LIST_HEAD(&list_head);
+ for (i = 0, mcaddr = mclist; i < count; i++) {
+ mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
+ if (mac == NULL)
+ goto err_return;
+ bfa_q_qe_init(&mac->qe);
+ memcpy(mac->addr, mcaddr, ETH_ALEN);
+ list_add_tail(&mac->qe, &list_head);
+
+ mcaddr += ETH_ALEN;
+ }
+
+ /* Schedule for addition */
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+
+ skip = 0;
+
+ /* Skip if already added */
+ list_for_each(qe, &rxf->mcast_active_q) {
+ mac1 = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
+ mac);
+ skip = 1;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ /* Skip if pending addition */
+ list_for_each(qe, &rxf->mcast_pending_add_q) {
+ mac1 = (struct bna_mac *)qe;
+ if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
+ mac);
+ skip = 1;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
+ need_hw_config = 1;
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ /**
+ * Delete the entries that are in the pending_add_q but not
+ * in the new list
+ */
+ while (!list_empty(&rxf->mcast_pending_add_q)) {
+ bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
+ if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
+ delete = 0;
+ break;
+ }
+ mcaddr += ETH_ALEN;
+ }
+ if (delete)
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ else
+ list_add_tail(&mac->qe, &list_head);
+ }
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ }
+
+ /**
+ * Schedule entries for deletion that are in the active_q but not
+ * in the new list
+ */
+ while (!list_empty(&rxf->mcast_active_q)) {
+ bfa_q_deq(&rxf->mcast_active_q, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
+ if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
+ delete = 0;
+ break;
+ }
+ mcaddr += ETH_ALEN;
+ }
+ if (delete) {
+ list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+ need_hw_config = 1;
+ } else {
+ list_add_tail(&mac->qe, &list_head);
+ }
+ }
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ }
+
+ if (need_hw_config) {
+ rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbarg = rx->bna->bnad;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ } else if (cbfn)
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+
+ return BNA_CB_SUCCESS;
+
+err_return:
+ while (!list_empty(&list_head)) {
+ bfa_q_deq(&list_head, &qe);
+ mac = (struct bna_mac *)qe;
+ bfa_q_qe_init(&mac->qe);
+ bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ }
+
+ return BNA_CB_MCAST_LIST_FULL;
+}
+
+void
+bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> 5);
+ int bit = (1 << (vlan_id & 0x1F));
+
+ rxf->vlan_filter_table[index] |= bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+void
+bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
+{
+ struct bna_rxf *rxf = &rx->rxf;
+ int index = (vlan_id >> 5);
+ int bit = (1 << (vlan_id & 0x1F));
+
+ rxf->vlan_filter_table[index] &= ~bit;
+ if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+ rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+ bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+ }
+}
+
+/**
+ * RX
+ */
+#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
+ struct bna_doorbell_qset *_qset; \
+ unsigned long off; \
+ (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
+ (q)->rcb->q_depth = (qdepth); \
+ (q)->rcb->unmap_q = unmapq_mem; \
+ (q)->rcb->rxq = (q); \
+ (q)->rcb->cq = &(rxp)->cq; \
+ (q)->rcb->bnad = (bna)->bnad; \
+ _qset = (struct bna_doorbell_qset *)0; \
+ off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
+ (q)->rcb->q_dbell = off + \
+ BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
+ (q)->rcb->id = _id; \
+} while (0)
+
+#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
+ (qcfg)->num_paths : ((qcfg)->num_paths * 2))
+
+#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
+ (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+
+#define call_rx_stop_callback(rx, status) \
+ if ((rx)->stop_cbfn) { \
+ (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
+ (rx)->stop_cbfn = NULL; \
+ (rx)->stop_cbarg = NULL; \
+ }
+
+/*
+ * Since rx_enable is synchronous callback, there is no start_cbfn required.
+ * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
+ * for each rxpath.
+ */
+
+#define call_rx_disable_cbfn(rx, status) \
+ if ((rx)->disable_cbfn) { \
+ (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
+ status); \
+ (rx)->disable_cbfn = NULL; \
+ (rx)->disable_cbarg = NULL; \
+ } \
+
+#define rxqs_reqd(type, num_rxqs) \
+ (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
+
+#define rx_ib_fail(rx) \
+do { \
+ struct bna_rxp *rxp; \
+ struct list_head *qe; \
+ list_for_each(qe, &(rx)->rxp_q) { \
+ rxp = (struct bna_rxp *)qe; \
+ bna_ib_fail(rxp->cq.ib); \
+ } \
+} while (0)
+
+static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
+static void __bna_rxq_start(struct bna_rxq *rxq);
+static void __bna_cq_start(struct bna_cq *cq);
+static void bna_rit_create(struct bna_rx *rx);
+static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
+static void bna_rx_cb_rxq_stopped_all(void *arg);
+
+bfa_fsm_state_decl(bna_rx, stopped,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_start_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, started,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
+ struct bna_rx, enum bna_rx_event);
+
+static struct bfa_sm_table rx_sm_table[] = {
+ {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
+ {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
+ {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
+ {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
+ {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
+};
+
+static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ }
+
+ call_rx_stop_callback(rx, BNA_CB_SUCCESS);
+}
+
+static void bna_rx_sm_stopped(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_START:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
+ break;
+ case RX_E_STOP:
+ call_rx_stop_callback(rx, BNA_CB_SUCCESS);
+ break;
+ case RX_E_FAIL:
+ /* no-op */
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+
+}
+
+static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+ struct bna_rxq *q0 = NULL, *q1 = NULL;
+
+ /* Setup the RIT */
+ bna_rit_create(rx);
+
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_start(rxp->cq.ib);
+ GET_RXQS(rxp, q0, q1);
+ q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
+ __bna_rxq_start(q0);
+ rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
+ if (q1) {
+ __bna_rxq_start(q1);
+ rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
+ }
+ __bna_cq_start(&rxp->cq);
+ }
+
+ bna_rxf_start(&rx->rxf);
+}
+
+static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
+ enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_STOP:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ case RX_E_RXF_STARTED:
+ bfa_fsm_set_state(rx, bna_rx_sm_started);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_started_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp;
+ struct list_head *qe_rxp;
+
+ /* Start IB */
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ bna_ib_ack(&rxp->cq.ib->door_bell, 0);
+ }
+
+ bna_llport_admin_up(&rx->bna->port.llport);
+}
+
+void
+bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_FAIL:
+ bna_llport_admin_down(&rx->bna->port.llport);
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ case RX_E_STOP:
+ bna_llport_admin_down(&rx->bna->port.llport);
+ bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
+{
+ bna_rxf_stop(&rx->rxf);
+}
+
+void
+bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ switch (event) {
+ case RX_E_RXF_STOPPED:
+ bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
+ break;
+ case RX_E_RXF_STARTED:
+ /**
+ * RxF was in the process of starting up when
+ * RXF_E_STOP was issued. Ignore this event
+ */
+ break;
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ rx_ib_fail(rx);
+ bna_rxf_fail(&rx->rxf);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+
+}
+
+void
+bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
+{
+ struct bna_rxp *rxp = NULL;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct list_head *qe;
+ u32 rxq_mask[2] = {0, 0};
+
+ /* Only one call to multi-rxq-stop for all RXPs in this RX */
+ bfa_wc_up(&rx->rxq_stop_wc);
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ GET_RXQS(rxp, q0, q1);
+ if (q0->rxq_id < 32)
+ rxq_mask[0] |= ((u32)1 << q0->rxq_id);
+ else
+ rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
+ if (q1) {
+ if (q1->rxq_id < 32)
+ rxq_mask[0] |= ((u32)1 << q1->rxq_id);
+ else
+ rxq_mask[1] |= ((u32)
+ 1 << (q1->rxq_id - 32));
+ }
+ }
+
+ __bna_multi_rxq_stop(rxp, rxq_mask);
+}
+
+void
+bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
+{
+ struct bna_rxp *rxp = NULL;
+ struct list_head *qe;
+
+ switch (event) {
+ case RX_E_RXQ_STOPPED:
+ list_for_each(qe, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe;
+ bna_ib_stop(rxp->cq.ib);
+ }
+ /* Fall through */
+ case RX_E_FAIL:
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ break;
+ default:
+ bfa_sm_fault(rx->bna, event);
+ break;
+ }
+}
+
+void
+__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
+{
+ struct bfi_ll_q_stop_req ll_req;
+
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
+ ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
+ ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
+ bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_rx_cb_multi_rxq_stopped, rxp);
+ bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
+}
+
+void
+__bna_rxq_start(struct bna_rxq *rxq)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_rxq_mem rxq_cfg, *rxq_mem;
+ struct bna_dma_addr cur_q_addr;
+ /* struct bna_doorbell_qset *qset; */
+ struct bna_qpt *qpt;
+ u32 pg_num;
+ struct bna *bna = rxq->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ qpt = &rxq->qpt;
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
+ rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
+ (qpt->page_size >> 2);
+ rxq_cfg.sg_n_cq_n_cns_ptr =
+ ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
+ rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
+ BNA_Q_IDLE_STATE;
+ rxq_cfg.next_qid = 0x0 | (0x3 << 8);
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, bna->regs.page_addr);
+
+ /* Write to h/w */
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+
+ q_mem = (struct bna_rxtx_q_mem *)0;
+ rxq_mem = &q_mem[rxq->rxq_id].rxq;
+
+ off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
+ writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
+ writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->cur_q_entry_lo;
+ writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->cur_q_entry_hi;
+ writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
+ writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->entry_n_pg_size;
+ writel(rxq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
+ writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
+ writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
+
+ off = (unsigned long)&rxq_mem->next_qid;
+ writel(rxq_cfg.next_qid, base_addr + off);
+
+ rxq->rcb->producer_index = 0;
+ rxq->rcb->consumer_index = 0;
+}
+
+void
+__bna_cq_start(struct bna_cq *cq)
+{
+ struct bna_cq_mem cq_cfg, *cq_mem;
+ const struct bna_qpt *qpt;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+ struct bna *bna = cq->rx->bna;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ qpt = &cq->qpt;
+ cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
+
+ /*
+ * Fill out structure, to be subsequently written
+ * to hardware
+ */
+ cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
+ cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
+ cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
+ cq_cfg.entry_n_pg_size =
+ ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
+ cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
+ ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
+ cq_cfg.q_state = BNA_Q_IDLE_STATE;
+
+ /* Write the page number register */
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ writel(pg_num, bna->regs.page_addr);
+
+ /* H/W write */
+ base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
+ HQM_CQ_RAM_BASE_OFFSET);
+
+ cq_mem = (struct bna_cq_mem *)0;
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
+ writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
+ writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
+ writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
+ writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
+ writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
+ writel(cq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
+ writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&cq_mem[cq->cq_id].q_state;
+ writel(cq_cfg.q_state, base_addr + off);
+
+ cq->ccb->producer_index = 0;
+ *(cq->ccb->hw_producer_index) = 0;
+}
+
+void
+bna_rit_create(struct bna_rx *rx)
+{
+ struct list_head *qe_rxp;
+ struct bna *bna;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ int offset;
+
+ bna = rx->bna;
+
+ offset = 0;
+ list_for_each(qe_rxp, &rx->rxp_q) {
+ rxp = (struct bna_rxp *)qe_rxp;
+ GET_RXQS(rxp, q0, q1);
+ rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
+ rx->rxf.rit_segment->rit[offset].small_rxq_id =
+ (q1 ? q1->rxq_id : 0);
+ offset++;
+ }
+}
+
+int
+_rx_can_satisfy(struct bna_rx_mod *rx_mod,
+ struct bna_rx_config *rx_cfg)
+{
+ if ((rx_mod->rx_free_count == 0) ||
+ (rx_mod->rxp_free_count == 0) ||
+ (rx_mod->rxq_free_count == 0))
+ return 0;
+
+ if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < rx_cfg->num_paths))
+ return 0;
+ } else {
+ if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
+ (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
+ return 0;
+ }
+
+ if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
+ return 0;
+
+ return 1;
+}
+
+struct bna_rxq *
+_get_free_rxq(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rxq *rxq = NULL;
+ struct list_head *qe = NULL;
+
+ bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ if (qe) {
+ rx_mod->rxq_free_count--;
+ rxq = (struct bna_rxq *)qe;
+ }
+ return rxq;
+}
+
+void
+_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+{
+ bfa_q_qe_init(&rxq->qe);
+ list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+}
+
+struct bna_rxp *
+_get_free_rxp(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rxp *rxp = NULL;
+
+ bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ if (qe) {
+ rx_mod->rxp_free_count--;
+
+ rxp = (struct bna_rxp *)qe;
+ }
+
+ return rxp;
+}
+
+void
+_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+{
+ bfa_q_qe_init(&rxp->qe);
+ list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+}
+
+struct bna_rx *
+_get_free_rx(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe = NULL;
+ struct bna_rx *rx = NULL;
+
+ bfa_q_deq(&rx_mod->rx_free_q, &qe);
+ if (qe) {
+ rx_mod->rx_free_count--;
+
+ rx = (struct bna_rx *)qe;
+ bfa_q_qe_init(qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ }
+
+ return rx;
+}
+
+void
+_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+{
+ bfa_q_qe_init(&rx->qe);
+ list_add_tail(&rx->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+}
+
+void
+_rx_init(struct bna_rx *rx, struct bna *bna)
+{
+ rx->bna = bna;
+ rx->rx_flags = 0;
+
+ INIT_LIST_HEAD(&rx->rxp_q);
+
+ rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
+ rx->rxq_stop_wc.wc_cbarg = rx;
+ rx->rxq_stop_wc.wc_count = 0;
+
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+}
+
+void
+_rxp_add_rxqs(struct bna_rxp *rxp,
+ struct bna_rxq *q0,
+ struct bna_rxq *q1)
+{
+ switch (rxp->type) {
+ case BNA_RXP_SINGLE:
+ rxp->rxq.single.only = q0;
+ rxp->rxq.single.reserved = NULL;
+ break;
+ case BNA_RXP_SLR:
+ rxp->rxq.slr.large = q0;
+ rxp->rxq.slr.small = q1;
+ break;
+ case BNA_RXP_HDS:
+ rxp->rxq.hds.data = q0;
+ rxp->rxq.hds.hdr = q1;
+ break;
+ default:
+ break;
+ }
+}
+
+void
+_rxq_qpt_init(struct bna_rxq *rxq,
+ struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxq->qpt.page_count = page_count;
+ rxq->qpt.page_size = page_size;
+
+ rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxq->qpt.page_count; i++) {
+ rxq->rcb->sw_qpt[i] = page_mem[i].kva;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+void
+_rxp_cqpt_setup(struct bna_rxp *rxp,
+ u32 page_count,
+ u32 page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
+ rxp->cq.qpt.page_count = page_count;
+ rxp->cq.qpt.page_size = page_size;
+
+ rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < rxp->cq.qpt.page_count; i++) {
+ rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+void
+_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
+{
+ list_add_tail(&rxp->qe, &rx->rxp_q);
+}
+
+void
+_init_rxmod_queues(struct bna_rx_mod *rx_mod)
+{
+ INIT_LIST_HEAD(&rx_mod->rx_free_q);
+ INIT_LIST_HEAD(&rx_mod->rxq_free_q);
+ INIT_LIST_HEAD(&rx_mod->rxp_free_q);
+ INIT_LIST_HEAD(&rx_mod->rx_active_q);
+
+ rx_mod->rx_free_count = 0;
+ rx_mod->rxq_free_count = 0;
+ rx_mod->rxp_free_count = 0;
+}
+
+void
+_rx_ctor(struct bna_rx *rx, int id)
+{
+ bfa_q_qe_init(&rx->qe);
+ INIT_LIST_HEAD(&rx->rxp_q);
+ rx->bna = NULL;
+
+ rx->rxf.rxf_id = id;
+
+ /* FIXME: mbox_qe ctor()?? */
+ bfa_q_qe_init(&rx->mbox_qe.qe);
+
+ rx->stop_cbfn = NULL;
+ rx->stop_cbarg = NULL;
+}
+
+void
+bna_rx_cb_multi_rxq_stopped(void *arg, int status)
+{
+ struct bna_rxp *rxp = (struct bna_rxp *)arg;
+
+ bfa_wc_down(&rxp->rx->rxq_stop_wc);
+}
+
+void
+bna_rx_cb_rxq_stopped_all(void *arg)
+{
+ struct bna_rx *rx = (struct bna_rx *)arg;
+
+ bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
+}
+
+void
+bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ bfa_wc_down(&rx_mod->rx_stop_wc);
+}
+
+void
+bna_rx_mod_cb_rx_stopped_all(void *arg)
+{
+ struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+
+ if (rx_mod->stop_cbfn)
+ rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
+ rx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_rx_start(struct bna_rx *rx)
+{
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ if (rx->rx_flags & BNA_RX_F_ENABLE)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_stop(struct bna_rx *rx)
+{
+ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+ if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
+ else {
+ rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
+ rx->stop_cbarg = &rx->bna->rx_mod;
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+void
+bna_rx_fail(struct bna_rx *rx)
+{
+ /* Indicate port is not enabled, and failed */
+ rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+ rx->rx_flags |= BNA_RX_F_PORT_FAILED;
+ bfa_fsm_send_event(rx, RX_E_FAIL);
+}
+
+void
+bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+ if (rx->rxf.rxf_id < 32)
+ rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
+ else
+ rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
+ 1 << (rx->rxf.rxf_id - 32));
+}
+
+void
+bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
+{
+ bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+ if (rx->rxf.rxf_id < 32)
+ rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
+ else
+ rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
+ 1 << (rx->rxf.rxf_id - 32);
+}
+
+void
+bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
+ if (type == BNA_RX_T_LOOPBACK)
+ rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_start(rx);
+ }
+}
+
+void
+bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
+
+ /**
+ * Before calling bna_rx_stop(), increment rx_stop_wc as many times
+ * as we are going to call bna_rx_stop
+ */
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bfa_wc_up(&rx_mod->rx_stop_wc);
+ }
+
+ if (rx_mod->rx_stop_wc.wc_count == 0) {
+ rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
+ rx_mod->stop_cbfn = NULL;
+ return;
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ if (rx->type == type)
+ bna_rx_stop(rx);
+ }
+}
+
+void
+bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
+{
+ struct bna_rx *rx;
+ struct list_head *qe;
+
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
+ rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ rx = (struct bna_rx *)qe;
+ bna_rx_fail(rx);
+ }
+}
+
+void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int index;
+ struct bna_rx *rx_ptr;
+ struct bna_rxp *rxp_ptr;
+ struct bna_rxq *rxq_ptr;
+
+ rx_mod->bna = bna;
+ rx_mod->flags = 0;
+
+ rx_mod->rx = (struct bna_rx *)
+ res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxp = (struct bna_rxp *)
+ res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
+ rx_mod->rxq = (struct bna_rxq *)
+ res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ /* Initialize the queues */
+ _init_rxmod_queues(rx_mod);
+
+ /* Build RX queues */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rx_ptr = &rx_mod->rx[index];
+ _rx_ctor(rx_ptr, index);
+ list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
+ rx_mod->rx_free_count++;
+ }
+
+ /* build RX-path queue */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rxp_ptr = &rx_mod->rxp[index];
+ rxp_ptr->cq.cq_id = index;
+ bfa_q_qe_init(&rxp_ptr->qe);
+ list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
+ rx_mod->rxp_free_count++;
+ }
+
+ /* build RXQ queue */
+ for (index = 0; index < BFI_MAX_RXQ; index++) {
+ rxq_ptr = &rx_mod->rxq[index];
+ rxq_ptr->rxq_id = index;
+
+ bfa_q_qe_init(&rxq_ptr->qe);
+ list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
+ rx_mod->rxq_free_count++;
+ }
+
+ rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
+ rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
+ rx_mod->rx_stop_wc.wc_count = 0;
+}
+
+void
+bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxp_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &rx_mod->rxq_free_q)
+ i++;
+
+ rx_mod->bna = NULL;
+}
+
+int
+bna_rx_state_get(struct bna_rx *rx)
+{
+ return bfa_sm_to_state(rx_sm_table, rx->fsm);
+}
+
+void
+bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
+{
+ u32 cq_size, hq_size, dq_size;
+ u32 cpage_count, hpage_count, dpage_count;
+ struct bna_mem_info *mem_info;
+ u32 cq_depth;
+ u32 hq_depth;
+ u32 dq_depth;
+
+ dq_depth = q_cfg->q_depth;
+ hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+ cq_depth = dq_depth + hq_depth;
+
+ BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_size = cq_depth * BFI_CQ_WI_SIZE;
+ cq_size = ALIGN(cq_size, PAGE_SIZE);
+ cpage_count = SIZE_TO_PAGES(cq_size);
+
+ BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_size = dq_depth * BFI_RXQ_WI_SIZE;
+ dq_size = ALIGN(dq_size, PAGE_SIZE);
+ dpage_count = SIZE_TO_PAGES(dq_size);
+
+ if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
+ BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_size = hq_depth * BFI_RXQ_WI_SIZE;
+ hq_size = ALIGN(hq_size, PAGE_SIZE);
+ hpage_count = SIZE_TO_PAGES(hq_size);
+ } else {
+ hpage_count = 0;
+ }
+
+ /* CCB structures */
+ res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_ccb);
+ mem_info->num = q_cfg->num_paths;
+
+ /* RCB structures */
+ res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_rcb);
+ mem_info->num = BNA_GET_RXQS(q_cfg);
+
+ /* Completion QPT */
+ res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Completion s/w QPT */
+ res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = cpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Completion QPT pages */
+ res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = cpage_count * q_cfg->num_paths;
+
+ /* Data QPTs */
+ res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Data s/w QPTs */
+ res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = dpage_count * sizeof(void *);
+ mem_info->num = q_cfg->num_paths;
+
+ /* Data QPT pages */
+ res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = dpage_count * q_cfg->num_paths;
+
+ /* Hdr QPTs */
+ res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ /* Hdr s/w QPTs */
+ res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = hpage_count * sizeof(void *);
+ mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
+
+ /* Hdr QPT pages */
+ res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = (hpage_count ? PAGE_SIZE : 0);
+ mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
+
+ /* RX Interrupts */
+ res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
+ res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
+}
+
+struct bna_rx *
+bna_rx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_rx_config *rx_cfg,
+ struct bna_rx_event_cbfn *rx_cbfn,
+ struct bna_res_info *res_info,
+ void *priv)
+{
+ struct bna_rx_mod *rx_mod = &bna->rx_mod;
+ struct bna_rx *rx;
+ struct bna_rxp *rxp;
+ struct bna_rxq *q0;
+ struct bna_rxq *q1;
+ struct bna_intr_info *intr_info;
+ u32 page_count;
+ struct bna_mem_descr *ccb_mem;
+ struct bna_mem_descr *rcb_mem;
+ struct bna_mem_descr *unmapq_mem;
+ struct bna_mem_descr *cqpt_mem;
+ struct bna_mem_descr *cswqpt_mem;
+ struct bna_mem_descr *cpage_mem;
+ struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
+ struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
+ struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
+ struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
+ struct bna_mem_descr *hpage_mem; /* hdr page mem */
+ struct bna_mem_descr *dpage_mem; /* data page mem */
+ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
+ int dpage_count, hpage_count, rcb_idx;
+ struct bna_ib_config ibcfg;
+ /* Fail if we don't have enough RXPs, RXQs */
+ if (!_rx_can_satisfy(rx_mod, rx_cfg))
+ return NULL;
+
+ /* Initialize resource pointers */
+ intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
+ rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
+ unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+ cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
+ cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
+ cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
+ hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
+ dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
+ hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
+ dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
+ hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
+ dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
+
+ /* Compute q depth & page count */
+ page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+
+ hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
+ rx_cfg->num_paths;
+ /* Get RX pointer */
+ rx = _get_free_rx(rx_mod);
+ _rx_init(rx, bna);
+ rx->priv = priv;
+ rx->type = rx_cfg->rx_type;
+
+ rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
+ rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
+ rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
+ rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
+ rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
+
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
+ switch (rx->type) {
+ case BNA_RX_T_REGULAR:
+ if (!(rx->bna->rx_mod.flags &
+ BNA_RX_MOD_F_PORT_LOOPBACK))
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ break;
+ case BNA_RX_T_LOOPBACK:
+ if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
+ rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+ break;
+ }
+ }
+
+ for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
+ rxp = _get_free_rxp(rx_mod);
+ rxp->type = rx_cfg->rxp_type;
+ rxp->rx = rx;
+ rxp->cq.rx = rx;
+
+ /* Get required RXQs, and queue them to rx-path */
+ q0 = _get_free_rxq(rx_mod);
+ if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
+ q1 = NULL;
+ else
+ q1 = _get_free_rxq(rx_mod);
+
+ /* Initialize IB */
+ if (1 == intr_info->num) {
+ rxp->cq.ib = bna_ib_get(&bna->ib_mod,
+ intr_info->intr_type,
+ intr_info->idl[0].vector);
+ rxp->vector = intr_info->idl[0].vector;
+ } else {
+ rxp->cq.ib = bna_ib_get(&bna->ib_mod,
+ intr_info->intr_type,
+ intr_info->idl[i].vector);
+
+ /* Map the MSI-x vector used for this RXP */
+ rxp->vector = intr_info->idl[i].vector;
+ }
+
+ rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
+
+ ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+ ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
+ ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
+ ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
+
+ ret = bna_ib_config(rxp->cq.ib, &ibcfg);
+
+ /* Link rxqs to rxp */
+ _rxp_add_rxqs(rxp, q0, q1);
+
+ /* Link rxp to rx */
+ _rx_add_rxp(rx, rxp);
+
+ q0->rx = rx;
+ q0->rxp = rxp;
+
+ /* Initialize RCB for the large / data q */
+ q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
+ (void *)unmapq_mem[rcb_idx].kva);
+ rcb_idx++;
+ (q0)->rx_packets = (q0)->rx_bytes = 0;
+ (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
+
+ /* Initialize RXQs */
+ _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
+ &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
+ q0->rcb->page_idx = dpage_idx;
+ q0->rcb->page_count = dpage_count;
+ dpage_idx += dpage_count;
+
+ /* Call bnad to complete rcb setup */
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q0->rcb);
+
+ if (q1) {
+ q1->rx = rx;
+ q1->rxp = rxp;
+
+ q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
+ RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
+ (void *)unmapq_mem[rcb_idx].kva);
+ rcb_idx++;
+ (q1)->buffer_size = (rx_cfg)->small_buff_size;
+ (q1)->rx_packets = (q1)->rx_bytes = 0;
+ (q1)->rx_packets_with_error =
+ (q1)->rxbuf_alloc_failed = 0;
+
+ _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
+ &hqpt_mem[i], &hsqpt_mem[i],
+ &hpage_mem[hpage_idx]);
+ q1->rcb->page_idx = hpage_idx;
+ q1->rcb->page_count = hpage_count;
+ hpage_idx += hpage_count;
+
+ /* Call bnad to complete rcb setup */
+ if (rx->rcb_setup_cbfn)
+ rx->rcb_setup_cbfn(bnad, q1->rcb);
+ }
+ /* Setup RXP::CQ */
+ rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
+ _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
+ &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
+ rxp->cq.ccb->page_idx = cpage_idx;
+ rxp->cq.ccb->page_count = page_count;
+ cpage_idx += page_count;
+
+ rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
+ rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
+
+ rxp->cq.ccb->producer_index = 0;
+ rxp->cq.ccb->q_depth = rx_cfg->q_depth +
+ ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+ 0 : rx_cfg->q_depth);
+ rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
+ rxp->cq.ccb->rcb[0] = q0->rcb;
+ if (q1)
+ rxp->cq.ccb->rcb[1] = q1->rcb;
+ rxp->cq.ccb->cq = &rxp->cq;
+ rxp->cq.ccb->bnad = bna->bnad;
+ rxp->cq.ccb->hw_producer_index =
+ ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
+ (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
+ *(rxp->cq.ccb->hw_producer_index) = 0;
+ rxp->cq.ccb->intr_type = intr_info->intr_type;
+ rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ rxp->cq.ccb->rx_coalescing_timeo =
+ rxp->cq.ib->ib_config.coalescing_timeo;
+ rxp->cq.ccb->id = i;
+
+ /* Call bnad to complete CCB setup */
+ if (rx->ccb_setup_cbfn)
+ rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
+
+ } /* for each rx-path */
+
+ bna_rxf_init(&rx->rxf, rx, rx_cfg);
+
+ bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+
+ return rx;
+}
+
+void
+bna_rx_destroy(struct bna_rx *rx)
+{
+ struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
+ struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
+ struct bna_rxq *q0 = NULL;
+ struct bna_rxq *q1 = NULL;
+ struct bna_rxp *rxp;
+ struct list_head *qe;
+
+ bna_rxf_uninit(&rx->rxf);
+
+ while (!list_empty(&rx->rxp_q)) {
+ bfa_q_deq(&rx->rxp_q, &rxp);
+ GET_RXQS(rxp, q0, q1);
+ /* Callback to bnad for destroying RCB */
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
+ q0->rcb = NULL;
+ q0->rxp = NULL;
+ q0->rx = NULL;
+ _put_free_rxq(rx_mod, q0);
+ if (q1) {
+ /* Callback to bnad for destroying RCB */
+ if (rx->rcb_destroy_cbfn)
+ rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
+ q1->rcb = NULL;
+ q1->rxp = NULL;
+ q1->rx = NULL;
+ _put_free_rxq(rx_mod, q1);
+ }
+ rxp->rxq.slr.large = NULL;
+ rxp->rxq.slr.small = NULL;
+ if (rxp->cq.ib) {
+ if (rxp->cq.ib_seg_offset != 0xff)
+ bna_ib_release_idx(rxp->cq.ib,
+ rxp->cq.ib_seg_offset);
+ bna_ib_put(ib_mod, rxp->cq.ib);
+ rxp->cq.ib = NULL;
+ }
+ /* Callback to bnad for destroying CCB */
+ if (rx->ccb_destroy_cbfn)
+ rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
+ rxp->cq.ccb = NULL;
+ rxp->rx = NULL;
+ _put_free_rxp(rx_mod, rxp);
+ }
+
+ list_for_each(qe, &rx_mod->rx_active_q) {
+ if (qe == &rx->qe) {
+ list_del(&rx->qe);
+ bfa_q_qe_init(&rx->qe);
+ break;
+ }
+ }
+
+ rx->bna = NULL;
+ rx->priv = NULL;
+ _put_free_rx(rx_mod, rx);
+}
+
+void
+bna_rx_enable(struct bna_rx *rx)
+{
+ if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ return;
+
+ rx->rx_flags |= BNA_RX_F_ENABLE;
+ if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
+ bfa_fsm_send_event(rx, RX_E_START);
+}
+
+void
+bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_rx *,
+ enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ /* h/w should not be accessed. Treat we're stopped */
+ (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+ } else {
+ rx->stop_cbfn = cbfn;
+ rx->stop_cbarg = rx->bna->bnad;
+
+ rx->rx_flags &= ~BNA_RX_F_ENABLE;
+
+ bfa_fsm_send_event(rx, RX_E_STOP);
+ }
+}
+
+/**
+ * TX
+ */
+#define call_tx_stop_cbfn(tx, status)\
+do {\
+ if ((tx)->stop_cbfn)\
+ (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
+ (tx)->stop_cbfn = NULL;\
+ (tx)->stop_cbarg = NULL;\
+} while (0)
+
+#define call_tx_prio_change_cbfn(tx, status)\
+do {\
+ if ((tx)->prio_change_cbfn)\
+ (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
+ (tx)->prio_change_cbfn = NULL;\
+} while (0)
+
+static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
+ enum bna_cb_status status);
+static void bna_tx_cb_txq_stopped(void *arg, int status);
+static void bna_tx_cb_stats_cleared(void *arg, int status);
+static void __bna_tx_stop(struct bna_tx *tx);
+static void __bna_tx_start(struct bna_tx *tx);
+static void __bna_txf_stat_clr(struct bna_tx *tx);
+
+enum bna_tx_event {
+ TX_E_START = 1,
+ TX_E_STOP = 2,
+ TX_E_FAIL = 3,
+ TX_E_TXQ_STOPPED = 4,
+ TX_E_PRIO_CHANGE = 5,
+ TX_E_STAT_CLEARED = 6,
+};
+
+enum bna_tx_state {
+ BNA_TX_STOPPED = 1,
+ BNA_TX_STARTED = 2,
+ BNA_TX_TXQ_STOP_WAIT = 3,
+ BNA_TX_PRIO_STOP_WAIT = 4,
+ BNA_TX_STAT_CLR_WAIT = 5,
+};
+
+bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
+ enum bna_tx_event);
+bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
+ enum bna_tx_event);
+
+static struct bfa_sm_table tx_sm_table[] = {
+ {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
+ {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
+ {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
+ {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
+ {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
+};
+
+static void
+bna_tx_sm_stopped_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
+}
+
+static void
+bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_START:
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_FAIL:
+ /* No-op */
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ /**
+ * This event is received due to flushing of mbox when
+ * device fails
+ */
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_started_entry(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ __bna_tx_start(tx);
+
+ /* Start IB */
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_ack(&txq->ib->door_bell, 0);
+ }
+}
+
+static void
+bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
+ __bna_tx_stop(tx);
+ break;
+
+ case TX_E_FAIL:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_fail(txq->ib);
+ (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
+{
+}
+
+static void
+bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_FAIL:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(txq->ib);
+ }
+ bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
+{
+ __bna_tx_stop(tx);
+}
+
+static void
+bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ switch (event) {
+ case TX_E_STOP:
+ bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
+ break;
+
+ case TX_E_FAIL:
+ call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ case TX_E_TXQ_STOPPED:
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_stop(txq->ib);
+ (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
+ bfa_fsm_set_state(tx, bna_tx_sm_started);
+ break;
+
+ case TX_E_PRIO_CHANGE:
+ /* No-op */
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
+{
+ __bna_txf_stat_clr(tx);
+}
+
+static void
+bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
+{
+ switch (event) {
+ case TX_E_FAIL:
+ case TX_E_STAT_CLEARED:
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+ break;
+
+ default:
+ bfa_sm_fault(tx->bna, event);
+ }
+}
+
+static void
+__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
+{
+ struct bna_rxtx_q_mem *q_mem;
+ struct bna_txq_mem txq_cfg;
+ struct bna_txq_mem *txq_mem;
+ struct bna_dma_addr cur_q_addr;
+ u32 pg_num;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ /* Fill out structure, to be subsequently written to hardware */
+ txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
+ txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
+ cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
+ txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
+ txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
+
+ txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
+
+ txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
+ (txq->qpt.page_size >> 2);
+ txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
+ ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
+
+ txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
+ txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
+ (txq->priority & 0x3));
+ txq_cfg.wvc_n_cquota_n_rquota =
+ ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
+ (BFI_TX_MAX_WRR_QUOTA & 0xfff));
+
+ /* Setup the page and write to H/W */
+
+ pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ writel(pg_num, tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ HQM_RXTX_Q_RAM_BASE_OFFSET);
+ q_mem = (struct bna_rxtx_q_mem *)0;
+ txq_mem = &q_mem[txq->txq_id].txq;
+
+ /*
+ * The following 4 lines, is a hack b'cos the H/W needs to read
+ * these DMA addresses as little endian
+ */
+
+ off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
+ writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
+
+ off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
+ writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
+
+ off = (unsigned long)&txq_mem->cur_q_entry_lo;
+ writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
+
+ off = (unsigned long)&txq_mem->cur_q_entry_hi;
+ writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
+
+ off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
+ writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
+
+ off = (unsigned long)&txq_mem->entry_n_pg_size;
+ writel(txq_cfg.entry_n_pg_size, base_addr + off);
+
+ off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
+ writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
+
+ off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
+ writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
+
+ off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
+ writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
+
+ off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
+ writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
+
+ txq->tcb->producer_index = 0;
+ txq->tcb->consumer_index = 0;
+ *(txq->tcb->hw_consumer_index) = 0;
+
+}
+
+static void
+__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
+{
+ struct bfi_ll_q_stop_req ll_req;
+ u32 bit_mask[2] = {0, 0};
+ if (txq->txq_id < 32)
+ bit_mask[0] = (u32)1 << txq->txq_id;
+ else
+ bit_mask[1] = (u32)1 << (txq->txq_id - 32);
+
+ memset(&ll_req, 0, sizeof(ll_req));
+ ll_req.mh.msg_class = BFI_MC_LL;
+ ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
+ ll_req.mh.mtag.h2i.lpu_id = 0;
+ ll_req.q_id_mask[0] = htonl(bit_mask[0]);
+ ll_req.q_id_mask[1] = htonl(bit_mask[1]);
+
+ bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_tx_cb_txq_stopped, tx);
+
+ bna_mbox_send(tx->bna, &tx->mbox_qe);
+}
+
+static void
+__bna_txf_start(struct bna_tx *tx)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ struct bna_txf *txf = &tx->txf;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
+ tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ TX_FNDB_RAM_BASE_OFFSET);
+
+ tx_fndb = (struct bna_tx_fndb_ram *)0;
+ off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
+
+ writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
+ base_addr + off);
+
+ if (tx->txf.txf_id < 32)
+ tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
+ else
+ tx->bna->tx_mod.txf_bmap[1] |= ((u32)
+ 1 << (tx->txf.txf_id - 32));
+}
+
+static void
+__bna_txf_stop(struct bna_tx *tx)
+{
+ struct bna_tx_fndb_ram *tx_fndb;
+ u32 page_num;
+ u32 ctl_flags;
+ struct bna_txf *txf = &tx->txf;
+ void __iomem *base_addr;
+ unsigned long off;
+
+ /* retrieve the running txf_flags & turn off enable bit */
+ page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
+ (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
+ writel(page_num, tx->bna->regs.page_addr);
+
+ base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
+ TX_FNDB_RAM_BASE_OFFSET);
+ tx_fndb = (struct bna_tx_fndb_ram *)0;
+ off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
+
+ ctl_flags = readl(base_addr + off);
+ ctl_flags &= ~BFI_TXF_CF_ENABLE;
+
+ writel(ctl_flags, base_addr + off);
+
+ if (tx->txf.txf_id < 32)
+ tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
+ else
+ tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
+ 1 << (tx->txf.txf_id - 32));
+}
+
+static void
+__bna_txf_stat_clr(struct bna_tx *tx)
+{
+ struct bfi_ll_stats_req ll_req;
+ u32 txf_bmap[2] = {0, 0};
+ if (tx->txf.txf_id < 32)
+ txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
+ else
+ txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
+ bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+ ll_req.stats_mask = 0;
+ ll_req.rxf_id_mask[0] = 0;
+ ll_req.rxf_id_mask[1] = 0;
+ ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
+ ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
+
+ bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
+ bna_tx_cb_stats_cleared, tx);
+ bna_mbox_send(tx->bna, &tx->mbox_qe);
+}
+
+static void
+__bna_tx_start(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bna_ib_start(txq->ib);
+ __bna_txq_start(tx, txq);
+ }
+
+ __bna_txf_start(tx);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->tcb->priority = txq->priority;
+ (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+}
+
+static void
+__bna_tx_stop(struct bna_tx *tx)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+
+ __bna_txf_stop(tx);
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ bfa_wc_up(&tx->txq_stop_wc);
+ }
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ __bna_txq_stop(tx, txq);
+ }
+}
+
+static void
+bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
+ struct bna_mem_descr *qpt_mem,
+ struct bna_mem_descr *swqpt_mem,
+ struct bna_mem_descr *page_mem)
+{
+ int i;
+
+ txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
+ txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
+ txq->qpt.kv_qpt_ptr = qpt_mem->kva;
+ txq->qpt.page_count = page_count;
+ txq->qpt.page_size = page_size;
+
+ txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
+
+ for (i = 0; i < page_count; i++) {
+ txq->tcb->sw_qpt[i] = page_mem[i].kva;
+
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
+ page_mem[i].dma.lsb;
+ ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
+ page_mem[i].dma.msb;
+
+ }
+}
+
+static void
+bna_tx_free(struct bna_tx *tx)
+{
+ struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
+ struct bna_txq *txq;
+ struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
+ struct list_head *qe;
+
+ while (!list_empty(&tx->txq_q)) {
+ bfa_q_deq(&tx->txq_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ if (txq->ib) {
+ if (txq->ib_seg_offset != -1)
+ bna_ib_release_idx(txq->ib,
+ txq->ib_seg_offset);
+ bna_ib_put(ib_mod, txq->ib);
+ txq->ib = NULL;
+ }
+ txq->tcb = NULL;
+ txq->tx = NULL;
+ list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ if (qe == &tx->qe) {
+ list_del(&tx->qe);
+ bfa_q_qe_init(&tx->qe);
+ break;
+ }
+ }
+
+ tx->bna = NULL;
+ tx->priv = NULL;
+ list_add_tail(&tx->qe, &tx_mod->tx_free_q);
+}
+
+static void
+bna_tx_cb_txq_stopped(void *arg, int status)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+ bfa_wc_down(&tx->txq_stop_wc);
+}
+
+static void
+bna_tx_cb_txq_stopped_all(void *arg)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
+}
+
+static void
+bna_tx_cb_stats_cleared(void *arg, int status)
+{
+ struct bna_tx *tx = (struct bna_tx *)arg;
+
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+
+ bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
+}
+
+static void
+bna_tx_start(struct bna_tx *tx)
+{
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ if (tx->flags & BNA_TX_F_ENABLED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+static void
+bna_tx_stop(struct bna_tx *tx)
+{
+ tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
+ tx->stop_cbarg = &tx->bna->tx_mod;
+
+ tx->flags &= ~BNA_TX_F_PORT_STARTED;
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+static void
+bna_tx_fail(struct bna_tx *tx)
+{
+ tx->flags &= ~BNA_TX_F_PORT_STARTED;
+ bfa_fsm_send_event(tx, TX_E_FAIL);
+}
+
+void
+bna_tx_prio_changed(struct bna_tx *tx, int prio)
+{
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->priority = prio;
+ }
+
+ bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
+}
+
+static void
+bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
+{
+ if (cee_link)
+ tx->flags |= BNA_TX_F_PRIO_LOCK;
+ else
+ tx->flags &= ~BNA_TX_F_PRIO_LOCK;
+}
+
+static void
+bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ bfa_wc_down(&tx_mod->tx_stop_wc);
+}
+
+static void
+bna_tx_mod_cb_tx_stopped_all(void *arg)
+{
+ struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
+
+ if (tx_mod->stop_cbfn)
+ tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
+ tx_mod->stop_cbfn = NULL;
+}
+
+void
+bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
+{
+ u32 q_size;
+ u32 page_count;
+ struct bna_mem_info *mem_info;
+
+ res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = sizeof(struct bna_tcb);
+ mem_info->num = num_txq;
+
+ q_size = txq_depth * BFI_TXQ_WI_SIZE;
+ q_size = ALIGN(q_size, PAGE_SIZE);
+ page_count = q_size >> PAGE_SHIFT;
+
+ res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = page_count * sizeof(struct bna_dma_addr);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_KVA;
+ mem_info->len = page_count * sizeof(void *);
+ mem_info->num = num_txq;
+
+ res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
+ mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
+ mem_info->mem_type = BNA_MEM_T_DMA;
+ mem_info->len = PAGE_SIZE;
+ mem_info->num = num_txq * page_count;
+
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
+ BNA_INTR_T_MSIX;
+ res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
+}
+
+struct bna_tx *
+bna_tx_create(struct bna *bna, struct bnad *bnad,
+ struct bna_tx_config *tx_cfg,
+ struct bna_tx_event_cbfn *tx_cbfn,
+ struct bna_res_info *res_info, void *priv)
+{
+ struct bna_intr_info *intr_info;
+ struct bna_tx_mod *tx_mod = &bna->tx_mod;
+ struct bna_tx *tx;
+ struct bna_txq *txq;
+ struct list_head *qe;
+ struct bna_ib_mod *ib_mod = &bna->ib_mod;
+ struct bna_doorbell_qset *qset;
+ struct bna_ib_config ib_config;
+ int page_count;
+ int page_size;
+ int page_idx;
+ int i;
+ unsigned long off;
+
+ intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
+ tx_cfg->num_txq;
+ page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
+
+ /**
+ * Get resources
+ */
+
+ if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
+ return NULL;
+
+ /* Tx */
+
+ if (list_empty(&tx_mod->tx_free_q))
+ return NULL;
+ bfa_q_deq(&tx_mod->tx_free_q, &tx);
+ bfa_q_qe_init(&tx->qe);
+
+ /* TxQs */
+
+ INIT_LIST_HEAD(&tx->txq_q);
+ for (i = 0; i < tx_cfg->num_txq; i++) {
+ if (list_empty(&tx_mod->txq_free_q))
+ goto err_return;
+
+ bfa_q_deq(&tx_mod->txq_free_q, &txq);
+ bfa_q_qe_init(&txq->qe);
+ list_add_tail(&txq->qe, &tx->txq_q);
+ txq->ib = NULL;
+ txq->ib_seg_offset = -1;
+ txq->tx = tx;
+ }
+
+ /* IBs */
+ i = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+
+ if (intr_info->num == 1)
+ txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
+ intr_info->idl[0].vector);
+ else
+ txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
+ intr_info->idl[i].vector);
+
+ if (txq->ib == NULL)
+ goto err_return;
+
+ txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
+ if (txq->ib_seg_offset == -1)
+ goto err_return;
+
+ i++;
+ }
+
+ /*
+ * Initialize
+ */
+
+ /* Tx */
+
+ tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
+ tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
+ /* Following callbacks are mandatory */
+ tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
+ tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
+ tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
+
+ list_add_tail(&tx->qe, &tx_mod->tx_active_q);
+ tx->bna = bna;
+ tx->priv = priv;
+ tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
+ tx->txq_stop_wc.wc_cbarg = tx;
+ tx->txq_stop_wc.wc_count = 0;
+
+ tx->type = tx_cfg->tx_type;
+
+ tx->flags = 0;
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
+ switch (tx->type) {
+ case BNA_TX_T_REGULAR:
+ if (!(tx->bna->tx_mod.flags &
+ BNA_TX_MOD_F_PORT_LOOPBACK))
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ break;
+ case BNA_TX_T_LOOPBACK:
+ if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
+ tx->flags |= BNA_TX_F_PORT_STARTED;
+ break;
+ }
+ }
+ if (tx->bna->tx_mod.cee_link)
+ tx->flags |= BNA_TX_F_PRIO_LOCK;
+
+ /* TxQ */
+
+ i = 0;
+ page_idx = 0;
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ txq->priority = tx_mod->priority;
+ txq->tcb = (struct bna_tcb *)
+ res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
+ txq->tx_packets = 0;
+ txq->tx_bytes = 0;
+
+ /* IB */
+
+ ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ ib_config.interpkt_timeo = 0; /* Not used */
+ ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
+ ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
+ BFI_IB_CF_INT_ENABLE |
+ BFI_IB_CF_COALESCING_MODE);
+ bna_ib_config(txq->ib, &ib_config);
+
+ /* TCB */
+
+ txq->tcb->producer_index = 0;
+ txq->tcb->consumer_index = 0;
+ txq->tcb->hw_consumer_index = (volatile u32 *)
+ ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
+ (txq->ib_seg_offset * BFI_IBIDX_SIZE));
+ *(txq->tcb->hw_consumer_index) = 0;
+ txq->tcb->q_depth = tx_cfg->txq_depth;
+ txq->tcb->unmap_q = (void *)
+ res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
+ qset = (struct bna_doorbell_qset *)0;
+ off = (unsigned long)&qset[txq->txq_id].txq[0];
+ txq->tcb->q_dbell = off +
+ BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
+ txq->tcb->i_dbell = &txq->ib->door_bell;
+ txq->tcb->intr_type = intr_info->intr_type;
+ txq->tcb->intr_vector = (intr_info->num == 1) ?
+ intr_info->idl[0].vector :
+ intr_info->idl[i].vector;
+ txq->tcb->txq = txq;
+ txq->tcb->bnad = bnad;
+ txq->tcb->id = i;
+
+ /* QPT, SWQPT, Pages */
+ bna_txq_qpt_setup(txq, page_count, page_size,
+ &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
+ &res_info[BNA_TX_RES_MEM_T_PAGE].
+ res_u.mem_info.mdl[page_idx]);
+ txq->tcb->page_idx = page_idx;
+ txq->tcb->page_count = page_count;
+ page_idx += page_count;
+
+ /* Callback to bnad for setting up TCB */
+ if (tx->tcb_setup_cbfn)
+ (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
+
+ i++;
+ }
+
+ /* TxF */
+
+ tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
+ tx->txf.vlan = 0;
+
+ /* Mbox element */
+ bfa_q_qe_init(&tx->mbox_qe.qe);
+
+ bfa_fsm_set_state(tx, bna_tx_sm_stopped);
+
+ return tx;
+
+err_return:
+ bna_tx_free(tx);
+ return NULL;
+}
+
+void
+bna_tx_destroy(struct bna_tx *tx)
+{
+ /* Callback to bnad for destroying TCB */
+ if (tx->tcb_destroy_cbfn) {
+ struct bna_txq *txq;
+ struct list_head *qe;
+
+ list_for_each(qe, &tx->txq_q) {
+ txq = (struct bna_txq *)qe;
+ (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
+ }
+ }
+
+ bna_tx_free(tx);
+}
+
+void
+bna_tx_enable(struct bna_tx *tx)
+{
+ if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ return;
+
+ tx->flags |= BNA_TX_F_ENABLED;
+
+ if (tx->flags & BNA_TX_F_PORT_STARTED)
+ bfa_fsm_send_event(tx, TX_E_START);
+}
+
+void
+bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
+{
+ if (type == BNA_SOFT_CLEANUP) {
+ (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
+ return;
+ }
+
+ tx->stop_cbfn = cbfn;
+ tx->stop_cbarg = tx->bna->bnad;
+
+ tx->flags &= ~BNA_TX_F_ENABLED;
+
+ bfa_fsm_send_event(tx, TX_E_STOP);
+}
+
+int
+bna_tx_state_get(struct bna_tx *tx)
+{
+ return bfa_sm_to_state(tx_sm_table, tx->fsm);
+}
+
+void
+bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ struct bna_res_info *res_info)
+{
+ int i;
+
+ tx_mod->bna = bna;
+ tx_mod->flags = 0;
+
+ tx_mod->tx = (struct bna_tx *)
+ res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
+ tx_mod->txq = (struct bna_txq *)
+ res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
+
+ INIT_LIST_HEAD(&tx_mod->tx_free_q);
+ INIT_LIST_HEAD(&tx_mod->tx_active_q);
+
+ INIT_LIST_HEAD(&tx_mod->txq_free_q);
+
+ for (i = 0; i < BFI_MAX_TXQ; i++) {
+ tx_mod->tx[i].txf.txf_id = i;
+ bfa_q_qe_init(&tx_mod->tx[i].qe);
+ list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
+
+ tx_mod->txq[i].txq_id = i;
+ bfa_q_qe_init(&tx_mod->txq[i].qe);
+ list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
+ }
+
+ tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
+ tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
+ tx_mod->tx_stop_wc.wc_count = 0;
+}
+
+void
+bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
+{
+ struct list_head *qe;
+ int i;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->tx_free_q)
+ i++;
+
+ i = 0;
+ list_for_each(qe, &tx_mod->txq_free_q)
+ i++;
+
+ tx_mod->bna = NULL;
+}
+
+void
+bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
+ if (type == BNA_TX_T_LOOPBACK)
+ tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_start(tx);
+ }
+}
+
+void
+bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
+
+ /**
+ * Before calling bna_tx_stop(), increment tx_stop_wc as many times
+ * as we are going to call bna_tx_stop
+ */
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bfa_wc_up(&tx_mod->tx_stop_wc);
+ }
+
+ if (tx_mod->tx_stop_wc.wc_count == 0) {
+ tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
+ tx_mod->stop_cbfn = NULL;
+ return;
+ }
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ if (tx->type == type)
+ bna_tx_stop(tx);
+ }
+}
+
+void
+bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
+ tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_fail(tx);
+ }
+}
+
+void
+bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ if (prio != tx_mod->priority) {
+ tx_mod->priority = prio;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_prio_changed(tx, prio);
+ }
+ }
+}
+
+void
+bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
+{
+ struct bna_tx *tx;
+ struct list_head *qe;
+
+ tx_mod->cee_link = cee_link;
+
+ list_for_each(qe, &tx_mod->tx_active_q) {
+ tx = (struct bna_tx *)qe;
+ bna_tx_cee_link_status(tx, cee_link);
+ }
+}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
new file mode 100644
index 000000000000..6877310f6ef4
--- /dev/null
+++ b/drivers/net/bna/bna_types.h
@@ -0,0 +1,1128 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNA_TYPES_H__
+#define __BNA_TYPES_H__
+
+#include "cna.h"
+#include "bna_hw.h"
+#include "bfa_cee.h"
+
+/**
+ *
+ * Forward declarations
+ *
+ */
+
+struct bna_txq;
+struct bna_tx;
+struct bna_rxq;
+struct bna_cq;
+struct bna_rx;
+struct bna_rxf;
+struct bna_port;
+struct bna;
+struct bnad;
+
+/**
+ *
+ * Enums, primitive data types
+ *
+ */
+
+enum bna_status {
+ BNA_STATUS_T_DISABLED = 0,
+ BNA_STATUS_T_ENABLED = 1
+};
+
+enum bna_cleanup_type {
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
+};
+
+enum bna_cb_status {
+ BNA_CB_SUCCESS = 0,
+ BNA_CB_FAIL = 1,
+ BNA_CB_INTERRUPT = 2,
+ BNA_CB_BUSY = 3,
+ BNA_CB_INVALID_MAC = 4,
+ BNA_CB_MCAST_LIST_FULL = 5,
+ BNA_CB_UCAST_CAM_FULL = 6,
+ BNA_CB_WAITING = 7,
+ BNA_CB_NOT_EXEC = 8
+};
+
+enum bna_res_type {
+ BNA_RES_T_MEM = 1,
+ BNA_RES_T_INTR = 2
+};
+
+enum bna_mem_type {
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
+};
+
+enum bna_intr_type {
+ BNA_INTR_T_INTX = 1,
+ BNA_INTR_T_MSIX = 2
+};
+
+enum bna_res_req_type {
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_MEM_T_SWSTATS = 4,
+ BNA_RES_MEM_T_IBIDX = 5,
+ BNA_RES_MEM_T_IB_ARRAY = 6,
+ BNA_RES_MEM_T_INTR_ARRAY = 7,
+ BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
+ BNA_RES_MEM_T_TX_ARRAY = 9,
+ BNA_RES_MEM_T_TXQ_ARRAY = 10,
+ BNA_RES_MEM_T_RX_ARRAY = 11,
+ BNA_RES_MEM_T_RXP_ARRAY = 12,
+ BNA_RES_MEM_T_RXQ_ARRAY = 13,
+ BNA_RES_MEM_T_UCMAC_ARRAY = 14,
+ BNA_RES_MEM_T_MCMAC_ARRAY = 15,
+ BNA_RES_MEM_T_RIT_ENTRY = 16,
+ BNA_RES_MEM_T_RIT_SEGMENT = 17,
+ BNA_RES_INTR_T_MBOX = 18,
+ BNA_RES_T_MAX
+};
+
+enum bna_tx_res_req_type {
+ BNA_TX_RES_MEM_T_TCB = 0,
+ BNA_TX_RES_MEM_T_UNMAPQ = 1,
+ BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_SWQPT = 3,
+ BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_INTR_T_TXCMPL = 5,
+ BNA_TX_RES_T_MAX,
+};
+
+enum bna_rx_mem_type {
+ BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
+ BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
+ BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
+ BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
+ BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
+ BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
+ BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
+ BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
+ BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
+ BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
+ BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
+ BNA_RX_RES_T_MAX = 13
+};
+
+enum bna_mbox_state {
+ BNA_MBOX_FREE = 0,
+ BNA_MBOX_POSTED = 1
+};
+
+enum bna_tx_type {
+ BNA_TX_T_REGULAR = 0,
+ BNA_TX_T_LOOPBACK = 1,
+};
+
+enum bna_tx_flags {
+ BNA_TX_F_PORT_STARTED = 1,
+ BNA_TX_F_ENABLED = 2,
+ BNA_TX_F_PRIO_LOCK = 4,
+};
+
+enum bna_tx_mod_flags {
+ BNA_TX_MOD_F_PORT_STARTED = 1,
+ BNA_TX_MOD_F_PORT_LOOPBACK = 2,
+};
+
+enum bna_rx_type {
+ BNA_RX_T_REGULAR = 0,
+ BNA_RX_T_LOOPBACK = 1,
+};
+
+enum bna_rxp_type {
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
+};
+
+enum bna_rxmode {
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_DEFAULT = 2,
+ BNA_RXMODE_ALLMULTI = 4
+};
+
+enum bna_rx_event {
+ RX_E_START = 1,
+ RX_E_STOP = 2,
+ RX_E_FAIL = 3,
+ RX_E_RXF_STARTED = 4,
+ RX_E_RXF_STOPPED = 5,
+ RX_E_RXQ_STOPPED = 6,
+};
+
+enum bna_rx_state {
+ BNA_RX_STOPPED = 1,
+ BNA_RX_RXF_START_WAIT = 2,
+ BNA_RX_STARTED = 3,
+ BNA_RX_RXF_STOP_WAIT = 4,
+ BNA_RX_RXQ_STOP_WAIT = 5,
+};
+
+enum bna_rx_flags {
+ BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
+ BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
+ BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
+};
+
+enum bna_rx_mod_flags {
+ BNA_RX_MOD_F_PORT_STARTED = 1,
+ BNA_RX_MOD_F_PORT_LOOPBACK = 2,
+};
+
+enum bna_rxf_oper_state {
+ BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
+ BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
+};
+
+enum bna_rxf_flags {
+ BNA_RXF_FL_STOP_PENDING = 0x01,
+ BNA_RXF_FL_FAILED = 0x02,
+ BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
+ BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
+ BNA_RXF_FL_RXF_ENABLED = 0x10,
+ BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
+};
+
+enum bna_rxf_event {
+ RXF_E_START = 1,
+ RXF_E_STOP = 2,
+ RXF_E_FAIL = 3,
+ RXF_E_CAM_FLTR_MOD = 4,
+ RXF_E_STARTED = 5,
+ RXF_E_STOPPED = 6,
+ RXF_E_CAM_FLTR_RESP = 7,
+ RXF_E_PAUSE = 8,
+ RXF_E_RESUME = 9,
+ RXF_E_STAT_CLEARED = 10,
+};
+
+enum bna_rxf_state {
+ BNA_RXF_STOPPED = 1,
+ BNA_RXF_START_WAIT = 2,
+ BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
+ BNA_RXF_STARTED = 4,
+ BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
+ BNA_RXF_STOP_WAIT = 6,
+ BNA_RXF_PAUSE_WAIT = 7,
+ BNA_RXF_RESUME_WAIT = 8,
+ BNA_RXF_STAT_CLR_WAIT = 9,
+};
+
+enum bna_port_type {
+ BNA_PORT_T_REGULAR = 0,
+ BNA_PORT_T_LOOPBACK_INTERNAL = 1,
+ BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
+};
+
+enum bna_link_status {
+ BNA_LINK_DOWN = 0,
+ BNA_LINK_UP = 1,
+ BNA_CEE_UP = 2
+};
+
+enum bna_llport_flags {
+ BNA_LLPORT_F_ENABLED = 1,
+ BNA_LLPORT_F_RX_ENABLED = 2
+};
+
+enum bna_port_flags {
+ BNA_PORT_F_DEVICE_READY = 1,
+ BNA_PORT_F_ENABLED = 2,
+ BNA_PORT_F_PAUSE_CHANGED = 4,
+ BNA_PORT_F_MTU_CHANGED = 8
+};
+
+enum bna_pkt_rates {
+ BNA_PKT_RATE_10K = 10000,
+ BNA_PKT_RATE_20K = 20000,
+ BNA_PKT_RATE_30K = 30000,
+ BNA_PKT_RATE_40K = 40000,
+ BNA_PKT_RATE_50K = 50000,
+ BNA_PKT_RATE_60K = 60000,
+ BNA_PKT_RATE_70K = 70000,
+ BNA_PKT_RATE_80K = 80000,
+};
+
+enum bna_dim_load_types {
+ BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
+ BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
+ BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
+ BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
+ BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
+ BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
+ BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
+ BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
+ BNA_LOAD_T_MAX = 8
+};
+
+enum bna_dim_bias_types {
+ BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
+ BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
+ BNA_BIAS_T_MAX = 2
+};
+
+struct bna_mac {
+ /* This should be the first one */
+ struct list_head qe;
+ u8 addr[ETH_ALEN];
+};
+
+struct bna_mem_descr {
+ u32 len;
+ void *kva;
+ struct bna_dma_addr dma;
+};
+
+struct bna_mem_info {
+ enum bna_mem_type mem_type;
+ u32 len;
+ u32 num;
+ u32 align_sz; /* 0/1 = no alignment */
+ struct bna_mem_descr *mdl;
+ void *cookie; /* For bnad to unmap dma later */
+};
+
+struct bna_intr_descr {
+ int vector;
+};
+
+struct bna_intr_info {
+ enum bna_intr_type intr_type;
+ int num;
+ struct bna_intr_descr *idl;
+};
+
+union bna_res_u {
+ struct bna_mem_info mem_info;
+ struct bna_intr_info intr_info;
+};
+
+struct bna_res_info {
+ enum bna_res_type res_type;
+ union bna_res_u res_u;
+};
+
+/* HW QPT */
+struct bna_qpt {
+ struct bna_dma_addr hw_qpt_ptr;
+ void *kv_qpt_ptr;
+ u32 page_count;
+ u32 page_size;
+};
+
+/**
+ *
+ * Device
+ *
+ */
+
+struct bna_device {
+ bfa_fsm_t fsm;
+ struct bfa_ioc ioc;
+
+ enum bna_intr_type intr_type;
+ int vector;
+
+ void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+ struct bnad *ready_cbarg;
+
+ void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
+ struct bnad *stop_cbarg;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Mail box
+ *
+ */
+
+struct bna_mbox_qe {
+ /* This should be the first one */
+ struct list_head qe;
+
+ struct bfa_mbox_cmd cmd;
+ u32 cmd_len;
+ /* Callback for port, tx, rx, rxf */
+ void (*cbfn)(void *arg, int status);
+ void *cbarg;
+};
+
+struct bna_mbox_mod {
+ enum bna_mbox_state state;
+ struct list_head posted_q;
+ u32 msg_pending;
+ u32 msg_ctr;
+ struct bna *bna;
+};
+
+/**
+ *
+ * Port
+ *
+ */
+
+/* Pause configuration */
+struct bna_pause_config {
+ enum bna_status tx_pause;
+ enum bna_status rx_pause;
+};
+
+struct bna_llport {
+ bfa_fsm_t fsm;
+ enum bna_llport_flags flags;
+
+ enum bna_port_type type;
+
+ enum bna_link_status link_status;
+
+ int admin_up_count;
+
+ void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+};
+
+struct bna_port {
+ bfa_fsm_t fsm;
+ enum bna_port_flags flags;
+
+ enum bna_port_type type;
+
+ struct bna_llport llport;
+
+ struct bna_pause_config pause_config;
+ u8 priority;
+ int mtu;
+
+ /* Callback for bna_port_disable(), port_stop() */
+ void (*stop_cbfn)(void *, enum bna_cb_status);
+ void *stop_cbarg;
+
+ /* Callback for bna_port_pause_config() */
+ void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
+
+ /* Callback for bna_port_mtu_set() */
+ void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
+
+ void (*link_cbfn)(struct bnad *, enum bna_link_status);
+
+ struct bfa_wc chld_stop_wc;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Interrupt Block
+ *
+ */
+
+/* IB index segment structure */
+struct bna_ibidx_seg {
+ /* This should be the first one */
+ struct list_head qe;
+
+ u8 ib_seg_size;
+ u8 ib_idx_tbl_offset;
+};
+
+/* Interrupt structure */
+struct bna_intr {
+ /* This should be the first one */
+ struct list_head qe;
+ int ref_count;
+
+ enum bna_intr_type intr_type;
+ int vector;
+
+ struct bna_ib *ib;
+};
+
+/* Doorbell structure */
+struct bna_ib_dbell {
+ void *__iomem doorbell_addr;
+ u32 doorbell_ack;
+};
+
+/* Interrupt timer configuration */
+struct bna_ib_config {
+ u8 coalescing_timeo; /* Unit is 5usec. */
+
+ int interpkt_count;
+ int interpkt_timeo;
+
+ enum ib_flags ctrl_flags;
+};
+
+/* IB structure */
+struct bna_ib {
+ /* This should be the first one */
+ struct list_head qe;
+
+ int ib_id;
+
+ int ref_count;
+ int start_count;
+
+ struct bna_dma_addr ib_seg_host_addr;
+ void *ib_seg_host_addr_kva;
+ u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
+
+ struct bna_ibidx_seg *idx_seg;
+
+ struct bna_ib_dbell door_bell;
+
+ struct bna_intr *intr;
+
+ struct bna_ib_config ib_config;
+
+ struct bna *bna;
+};
+
+/* IB module - keeps track of IBs and interrupts */
+struct bna_ib_mod {
+ struct bna_ib *ib; /* BFI_MAX_IB entries */
+ struct bna_intr *intr; /* BFI_MAX_IB entries */
+ struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
+
+ struct list_head ib_free_q;
+
+ struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
+
+ struct list_head intr_free_q;
+ struct list_head intr_active_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Tx object
+ *
+ */
+
+/* Tx datapath control structure */
+#define BNA_Q_NAME_SIZE 16
+struct bna_tcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ volatile u32 *hw_consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ struct bna_ib_dbell *i_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_txq *txq;
+ struct bnad *bnad;
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 priority; /* Current priority */
+ unsigned long flags; /* Used by bnad as required */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* TxQ QPT and configuration */
+struct bna_txq {
+ /* This should be the first one */
+ struct list_head qe;
+
+ int txq_id;
+
+ u8 priority;
+
+ struct bna_qpt qpt;
+ struct bna_tcb *tcb;
+ struct bna_ib *ib;
+ int ib_seg_offset;
+
+ struct bna_tx *tx;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* TxF structure (hardware Tx Function) */
+struct bna_txf {
+ int txf_id;
+ enum txf_flags ctrl_flags;
+ u16 vlan;
+};
+
+/* Tx object */
+struct bna_tx {
+ /* This should be the first one */
+ struct list_head qe;
+
+ bfa_fsm_t fsm;
+ enum bna_tx_flags flags;
+
+ enum bna_tx_type type;
+
+ struct list_head txq_q;
+ struct bna_txf txf;
+
+ /* Tx event handlers */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+
+ /* callback for bna_tx_disable(), bna_tx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status);
+ void *stop_cbarg;
+
+ /* callback for bna_tx_prio_set() */
+ void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
+ enum bna_cb_status status);
+
+ struct bfa_wc txq_stop_wc;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_tx_config {
+ int num_txq;
+ int txq_depth;
+ enum bna_tx_type tx_type;
+};
+
+struct bna_tx_event_cbfn {
+ /* Optional */
+ void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
+ /* Mandatory */
+ void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
+ void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
+};
+
+/* Tx module - keeps track of free, active tx objects */
+struct bna_tx_mod {
+ struct bna_tx *tx; /* BFI_MAX_TXQ entries */
+ struct bna_txq *txq; /* BFI_MAX_TXQ entries */
+
+ struct list_head tx_free_q;
+ struct list_head tx_active_q;
+
+ struct list_head txq_free_q;
+
+ /* callback for bna_tx_mod_stop() */
+ void (*stop_cbfn)(struct bna_port *port,
+ enum bna_cb_status status);
+
+ struct bfa_wc tx_stop_wc;
+
+ enum bna_tx_mod_flags flags;
+
+ int priority;
+ int cee_link;
+
+ u32 txf_bmap[2];
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Receive Indirection Table
+ *
+ */
+
+/* One row of RIT table */
+struct bna_rit_entry {
+ u8 large_rxq_id; /* used for either large or data buffers */
+ u8 small_rxq_id; /* used for either small or header buffers */
+};
+
+/* RIT segment */
+struct bna_rit_segment {
+ struct list_head qe;
+
+ u32 rit_offset;
+ u32 rit_size;
+ /**
+ * max_rit_size: Varies per RIT segment depending on how RIT is
+ * partitioned
+ */
+ u32 max_rit_size;
+
+ struct bna_rit_entry *rit;
+};
+
+struct bna_rit_mod {
+ struct bna_rit_entry *rit;
+ struct bna_rit_segment *rit_segment;
+
+ struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
+};
+
+/**
+ *
+ * Rx object
+ *
+ */
+
+/* Rx datapath control structure */
+struct bna_rcb {
+ /* Fast path */
+ void **sw_qpt;
+ void *unmap_q;
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ void *__iomem q_dbell;
+ int page_idx;
+ int page_count;
+ /* Control path */
+ struct bna_rxq *rxq;
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ unsigned long flags;
+ int id;
+};
+
+/* RxQ structure - QPT, configuration */
+struct bna_rxq {
+ struct list_head qe;
+ int rxq_id;
+
+ int buffer_size;
+ int q_depth;
+
+ struct bna_qpt qpt;
+ struct bna_rcb *rcb;
+
+ struct bna_rxp *rxp;
+ struct bna_rx *rx;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
+};
+
+/* RxQ pair */
+union bna_rxq_u {
+ struct {
+ struct bna_rxq *hdr;
+ struct bna_rxq *data;
+ } hds;
+ struct {
+ struct bna_rxq *small;
+ struct bna_rxq *large;
+ } slr;
+ struct {
+ struct bna_rxq *only;
+ struct bna_rxq *reserved;
+ } single;
+};
+
+/* Packet rate for Dynamic Interrupt Moderation */
+struct bna_pkt_rate {
+ u32 small_pkt_cnt;
+ u32 large_pkt_cnt;
+};
+
+/* Completion control structure */
+struct bna_ccb {
+ /* Fast path */
+ void **sw_qpt;
+ u32 producer_index;
+ volatile u32 *hw_producer_index;
+ u32 q_depth;
+ struct bna_ib_dbell *i_dbell;
+ struct bna_rcb *rcb[2];
+ void *ctrl; /* For bnad */
+ struct bna_pkt_rate pkt_rate;
+ int page_idx;
+ int page_count;
+
+ /* Control path */
+ struct bna_cq *cq;
+ struct bnad *bnad;
+ enum bna_intr_type intr_type;
+ int intr_vector;
+ u8 rx_coalescing_timeo; /* For NAPI */
+ int id;
+ char name[BNA_Q_NAME_SIZE];
+};
+
+/* CQ QPT, configuration */
+struct bna_cq {
+ int cq_id;
+
+ struct bna_qpt qpt;
+ struct bna_ccb *ccb;
+
+ struct bna_ib *ib;
+ u8 ib_seg_offset;
+
+ struct bna_rx *rx;
+};
+
+struct bna_rss_config {
+ enum rss_hash_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+};
+
+struct bna_hds_config {
+ enum hds_header_type hdr_type;
+ int header_size;
+};
+
+/* This structure is used during RX creation */
+struct bna_rx_config {
+ enum bna_rx_type rx_type;
+ int num_paths;
+ enum bna_rxp_type rxp_type;
+ int paused;
+ int q_depth;
+ /*
+ * Small/Large (or Header/Data) buffer size to be configured
+ * for SLR and HDS queue type. Large buffer size comes from
+ * port->mtu.
+ */
+ int small_buff_size;
+
+ enum bna_status rss_status;
+ struct bna_rss_config rss_config;
+
+ enum bna_status hds_status;
+ struct bna_hds_config hds_config;
+
+ enum bna_status vlan_strip_status;
+};
+
+/* Rx Path structure - one per MSIX vector/CPU */
+struct bna_rxp {
+ /* This should be the first one */
+ struct list_head qe;
+
+ enum bna_rxp_type type;
+ union bna_rxq_u rxq;
+ struct bna_cq cq;
+
+ struct bna_rx *rx;
+
+ /* MSI-x vector number for configuring RSS */
+ int vector;
+
+ struct bna_mbox_qe mbox_qe;
+};
+
+/* HDS configuration structure */
+struct bna_rxf_hds {
+ enum hds_header_type hdr_type;
+ int header_size;
+};
+
+/* RSS configuration structure */
+struct bna_rxf_rss {
+ enum rss_hash_type hash_type;
+ u8 hash_mask;
+ u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
+};
+
+/* RxF structure (hardware Rx Function) */
+struct bna_rxf {
+ bfa_fsm_t fsm;
+ int rxf_id;
+ enum rxf_flags ctrl_flags;
+ u16 default_vlan_tag;
+ enum bna_rxf_oper_state rxf_oper_state;
+ enum bna_status hds_status;
+ struct bna_rxf_hds hds_cfg;
+ enum bna_status rss_status;
+ struct bna_rxf_rss rss_cfg;
+ struct bna_rit_segment *rit_segment;
+ struct bna_rx *rx;
+ u32 forced_offset;
+ struct bna_mbox_qe mbox_qe;
+ int mcast_rxq_id;
+
+ /* callback for bna_rxf_start() */
+ void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+ struct bna_rx *start_cbarg;
+
+ /* callback for bna_rxf_stop() */
+ void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
+ struct bna_rx *stop_cbarg;
+
+ /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
+ void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status);
+ struct bnad *oper_state_cbarg;
+
+ /**
+ * callback for:
+ * bna_rxf_ucast_set()
+ * bna_rxf_{ucast/mcast}_add(),
+ * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_mode_set()
+ */
+ void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status);
+ struct bnad *cam_fltr_cbarg;
+
+ enum bna_rxf_flags rxf_flags;
+
+ /* List of unicast addresses yet to be applied to h/w */
+ struct list_head ucast_pending_add_q;
+ struct list_head ucast_pending_del_q;
+ int ucast_pending_set;
+ /* ucast addresses applied to the h/w */
+ struct list_head ucast_active_q;
+ struct bna_mac *ucast_active_mac;
+
+ /* List of multicast addresses yet to be applied to h/w */
+ struct list_head mcast_pending_add_q;
+ struct list_head mcast_pending_del_q;
+ /* multicast addresses applied to the h/w */
+ struct list_head mcast_active_q;
+
+ /* Rx modes yet to be applied to h/w */
+ enum bna_rxmode rxmode_pending;
+ enum bna_rxmode rxmode_pending_bitmask;
+ /* Rx modes applied to h/w */
+ enum bna_rxmode rxmode_active;
+
+ enum bna_status vlan_filter_status;
+ u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+};
+
+/* Rx object */
+struct bna_rx {
+ /* This should be the first one */
+ struct list_head qe;
+
+ bfa_fsm_t fsm;
+
+ enum bna_rx_type type;
+
+ /* list-head for RX path objects */
+ struct list_head rxp_q;
+
+ struct bna_rxf rxf;
+
+ enum bna_rx_flags rx_flags;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bfa_wc rxq_stop_wc;
+
+ /* Rx event handlers */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+
+ /* callback for bna_rx_disable(), bna_rx_stop() */
+ void (*stop_cbfn)(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status);
+ void *stop_cbarg;
+
+ struct bna *bna;
+ void *priv; /* bnad's cookie */
+};
+
+struct bna_rx_event_cbfn {
+ /* Optional */
+ void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
+ void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ /* Mandatory */
+ void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
+};
+
+/* Rx module - keeps track of free, active rx objects */
+struct bna_rx_mod {
+ struct bna *bna; /* back pointer to parent */
+ struct bna_rx *rx; /* BFI_MAX_RXQ entries */
+ struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
+ struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
+
+ struct list_head rx_free_q;
+ struct list_head rx_active_q;
+ int rx_free_count;
+
+ struct list_head rxp_free_q;
+ int rxp_free_count;
+
+ struct list_head rxq_free_q;
+ int rxq_free_count;
+
+ enum bna_rx_mod_flags flags;
+
+ /* callback for bna_rx_mod_stop() */
+ void (*stop_cbfn)(struct bna_port *port,
+ enum bna_cb_status status);
+
+ struct bfa_wc rx_stop_wc;
+ u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
+ u32 rxf_bmap[2];
+};
+
+/**
+ *
+ * CAM
+ *
+ */
+
+struct bna_ucam_mod {
+ struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+struct bna_mcam_mod {
+ struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
+ struct list_head free_q;
+
+ struct bna *bna;
+};
+
+/**
+ *
+ * Statistics
+ *
+ */
+
+struct bna_tx_stats {
+ int tx_state;
+ int tx_flags;
+ int num_txqs;
+ u32 txq_bmap[2];
+ int txf_id;
+};
+
+struct bna_rx_stats {
+ int rx_state;
+ int rx_flags;
+ int num_rxps;
+ int num_rxqs;
+ u32 rxq_bmap[2];
+ u32 cq_bmap[2];
+ int rxf_id;
+ int rxf_state;
+ int rxf_oper_state;
+ int num_active_ucast;
+ int num_active_mcast;
+ int rxmode_active;
+ int vlan_filter_status;
+ u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
+ int rss_status;
+ int hds_status;
+};
+
+struct bna_sw_stats {
+ int device_state;
+ int port_state;
+ int port_flags;
+ int llport_state;
+ int priority;
+ int num_active_tx;
+ int num_active_rx;
+ struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
+ struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
+};
+
+struct bna_stats {
+ u32 txf_bmap[2];
+ u32 rxf_bmap[2];
+ struct bfi_ll_stats *hw_stats;
+ struct bna_sw_stats *sw_stats;
+};
+
+/**
+ *
+ * BNA
+ *
+ */
+
+struct bna {
+ struct bfa_pcidev pcidev;
+
+ int port_num;
+
+ struct bna_chip_regs regs;
+
+ struct bna_dma_addr hw_stats_dma;
+ struct bna_stats stats;
+
+ struct bna_device device;
+ struct bfa_cee cee;
+
+ struct bna_mbox_mod mbox_mod;
+
+ struct bna_port port;
+
+ struct bna_tx_mod tx_mod;
+
+ struct bna_rx_mod rx_mod;
+
+ struct bna_ib_mod ib_mod;
+
+ struct bna_ucam_mod ucam_mod;
+ struct bna_mcam_mod mcam_mod;
+
+ struct bna_rit_mod rit_mod;
+
+ int rxf_default_id;
+ int rxf_promisc_id;
+
+ struct bna_mbox_qe mbox_qe;
+
+ struct bnad *bnad;
+};
+
+#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
new file mode 100644
index 000000000000..e380c0e88f4f
--- /dev/null
+++ b/drivers/net/bna/bnad.c
@@ -0,0 +1,3267 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+#include "bnad.h"
+#include "bna.h"
+#include "cna.h"
+
+DEFINE_MUTEX(bnad_fwimg_mutex);
+
+/*
+ * Module params
+ */
+static uint bnad_msix_disable;
+module_param(bnad_msix_disable, uint, 0444);
+MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
+
+static uint bnad_ioc_auto_recover = 1;
+module_param(bnad_ioc_auto_recover, uint, 0444);
+MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+
+/*
+ * Global variables
+ */
+u32 bnad_rxqs_per_cq = 2;
+
+const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+/*
+ * Local MACROS
+ */
+#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
+
+#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
+
+#define BNAD_GET_MBOX_IRQ(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
+ ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
+ ((_bnad)->pcidev->irq))
+
+#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
+do { \
+ (_res_info)->res_type = BNA_RES_T_MEM; \
+ (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
+ (_res_info)->res_u.mem_info.num = (_num); \
+ (_res_info)->res_u.mem_info.len = \
+ sizeof(struct bnad_unmap_q) + \
+ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
+} while (0)
+
+/*
+ * Reinitialize completions in CQ, once Rx is taken down
+ */
+static void
+bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ unsigned int wi_range, wis = 0, ccb_prod = 0;
+ int i;
+
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
+ wi_range);
+
+ for (i = 0; i < ccb->q_depth; i++) {
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ }
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+}
+
+/*
+ * Frees all pending Tx Bufs
+ * At this point no activity is expected on the Q,
+ * so DMA unmap & freeing is fine.
+ */
+static void
+bnad_free_all_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u16 unmap_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb = NULL;
+ int i;
+
+ unmap_array = unmap_q->unmap_array;
+
+ unmap_cons = 0;
+ while (unmap_cons < unmap_q->q_depth) {
+ skb = unmap_array[unmap_cons].skb;
+ if (!skb) {
+ unmap_cons++;
+ continue;
+ }
+ unmap_array[unmap_cons].skb = NULL;
+
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr), skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ unmap_cons++;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ pci_unmap_page(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 0);
+ unmap_cons++;
+ }
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/* Data Path Handlers */
+
+/*
+ * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * Can be called in a) Interrupt context
+ * b) Sending context
+ * c) Tasklet context
+ */
+static u32
+bnad_free_txbufs(struct bnad *bnad,
+ struct bna_tcb *tcb)
+{
+ u32 sent_packets = 0, sent_bytes = 0;
+ u16 wis, unmap_cons, updated_hw_cons;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct sk_buff *skb;
+ int i;
+
+ /*
+ * Just return if TX is stopped. This check is useful
+ * when bnad_free_txbufs() runs out of a tasklet scheduled
+ * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
+ * but this routine runs actually after the cleanup has been
+ * executed.
+ */
+ if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ return 0;
+
+ updated_hw_cons = *(tcb->hw_consumer_index);
+
+ wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
+ updated_hw_cons, tcb->q_depth);
+
+ BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ while (wis) {
+ skb = unmap_array[unmap_cons].skb;
+
+ unmap_array[unmap_cons].skb = NULL;
+
+ sent_packets++;
+ sent_bytes += skb->len;
+ wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
+
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr), skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
+
+ prefetch(&unmap_array[unmap_cons + 1]);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ prefetch(&unmap_array[unmap_cons + 1]);
+
+ pci_unmap_page(bnad->pcidev,
+ pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 0);
+ BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Update consumer pointers. */
+ tcb->consumer_index = updated_hw_cons;
+ unmap_q->consumer_index = unmap_cons;
+
+ tcb->txq->tx_packets += sent_packets;
+ tcb->txq->tx_bytes += sent_bytes;
+
+ return sent_packets;
+}
+
+/* Tx Free Tasklet function */
+/* Frees for all the tcb's in all the Tx's */
+/*
+ * Scheduled from sending context, so that
+ * the fat Tx lock is not held for too long
+ * in the sending context.
+ */
+static void
+bnad_tx_free_tasklet(unsigned long bnad_ptr)
+{
+ struct bnad *bnad = (struct bnad *)bnad_ptr;
+ struct bna_tcb *tcb;
+ u32 acked;
+ int i, j;
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ if (!tcb)
+ continue;
+ if (((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index) &&
+ (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
+ &tcb->flags))) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ }
+ }
+ }
+}
+
+static u32
+bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct net_device *netdev = bnad->netdev;
+ u32 sent;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return 0;
+
+ sent = bnad_free_txbufs(bnad, tcb);
+ if (sent) {
+ if (netif_queue_stopped(netdev) &&
+ netif_carrier_ok(netdev) &&
+ BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ BNAD_NETIF_WAKE_THRESHOLD) {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ bna_ib_ack(tcb->i_dbell, sent);
+ } else
+ bna_ib_ack(tcb->i_dbell, 0);
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ return sent;
+}
+
+/* MSIX Tx Completion Handler */
+static irqreturn_t
+bnad_msix_tx(int irq, void *data)
+{
+ struct bna_tcb *tcb = (struct bna_tcb *)data;
+ struct bnad *bnad = tcb->bnad;
+
+ bnad_tx(bnad, tcb);
+
+ return IRQ_HANDLED;
+}
+
+static void
+bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ rcb->producer_index = 0;
+ rcb->consumer_index = 0;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+}
+
+static void
+bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q;
+ struct sk_buff *skb;
+
+ unmap_q = rcb->unmap_q;
+ while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+ skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ BUG_ON(!(skb));
+ unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+ pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
+ unmap_array[unmap_q->consumer_index],
+ dma_addr), rcb->rxq->buffer_size +
+ NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+ }
+
+ bnad_reset_rcb(bnad, rcb);
+}
+
+static void
+bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ u16 to_alloc, alloced, unmap_prod, wi_range;
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ struct bnad_skb_unmap *unmap_array;
+ struct bna_rxq_entry *rxent;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ alloced = 0;
+ to_alloc =
+ BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
+
+ unmap_array = unmap_q->unmap_array;
+ unmap_prod = unmap_q->producer_index;
+
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
+
+ while (to_alloc--) {
+ if (!wi_range) {
+ BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
+ wi_range);
+ }
+ skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ goto finishing;
+ }
+ skb->dev = bnad->netdev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ unmap_array[unmap_prod].skb = skb;
+ dma_addr = pci_map_single(bnad->pcidev, skb->data,
+ rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
+ pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ rxent++;
+ wi_range--;
+ alloced++;
+ }
+
+finishing:
+ if (likely(alloced)) {
+ unmap_q->producer_index = unmap_prod;
+ rcb->producer_index = unmap_prod;
+ smp_mb();
+ bna_rxq_prod_indx_doorbell(rcb);
+ }
+}
+
+/*
+ * Locking is required in the enable path
+ * because it is called from a napi poll
+ * context, where the bna_lock is not held
+ * unlike the IRQ context.
+ */
+static void
+bnad_enable_txrx_irqs(struct bnad *bnad)
+{
+ struct bna_tcb *tcb;
+ struct bna_ccb *ccb;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ tcb = bnad->tx_info[i].tcb[j];
+ bna_ib_coalescing_timer_set(tcb->i_dbell,
+ tcb->txq->ib->ib_config.coalescing_timeo);
+ bna_ib_ack(tcb->i_dbell, 0);
+ }
+ }
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
+ bnad_enable_rx_irq_unsafe(ccb);
+ }
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static inline void
+bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static u32
+bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+{
+ struct bna_cq_entry *cmpl, *next_cmpl;
+ struct bna_rcb *rcb = NULL;
+ unsigned int wi_range, packets = 0, wis = 0;
+ struct bnad_unmap_q *unmap_q;
+ struct sk_buff *skb;
+ u32 flags;
+ u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
+ struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+
+ prefetch(bnad->netdev);
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
+ wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ while (cmpl->valid && packets < budget) {
+ packets++;
+ BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
+
+ if (qid0 == cmpl->rxq_id)
+ rcb = ccb->rcb[0];
+ else
+ rcb = ccb->rcb[1];
+
+ unmap_q = rcb->unmap_q;
+
+ skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ BUG_ON(!(skb));
+ unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+ pci_unmap_single(bnad->pcidev,
+ pci_unmap_addr(&unmap_q->
+ unmap_array[unmap_q->
+ consumer_index],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ PCI_DMA_FROMDEVICE);
+ BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+
+ /* Should be more efficient ? Performance ? */
+ BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+
+ wis++;
+ if (likely(--wi_range))
+ next_cmpl = cmpl + 1;
+ else {
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+ wis = 0;
+ BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
+ next_cmpl, wi_range);
+ BUG_ON(!(wi_range <= ccb->q_depth));
+ }
+ prefetch(next_cmpl);
+
+ flags = ntohl(cmpl->flags);
+ if (unlikely
+ (flags &
+ (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
+ BNA_CQ_EF_TOO_LONG))) {
+ dev_kfree_skb_any(skb);
+ rcb->rxq->rx_packets_with_error++;
+ goto next;
+ }
+
+ skb_put(skb, ntohs(cmpl->length));
+ if (likely
+ (bnad->rx_csum &&
+ (((flags & BNA_CQ_EF_IPV4) &&
+ (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
+ (flags & BNA_CQ_EF_IPV6)) &&
+ (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
+ (flags & BNA_CQ_EF_L4_CKSUM_OK)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ rcb->rxq->rx_packets++;
+ rcb->rxq->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, bnad->netdev);
+
+ if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
+ struct bnad_rx_ctrl *rx_ctrl =
+ (struct bnad_rx_ctrl *)ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
+ ntohs(cmpl->vlan_tag), skb);
+ else
+ vlan_hwaccel_receive_skb(skb,
+ bnad->vlan_grp,
+ ntohs(cmpl->vlan_tag));
+
+ } else { /* Not VLAN tagged/stripped */
+ struct bnad_rx_ctrl *rx_ctrl =
+ (struct bnad_rx_ctrl *)ccb->ctrl;
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ctrl->napi, skb);
+ else
+ netif_receive_skb(skb);
+ }
+
+next:
+ cmpl->valid = 0;
+ cmpl = next_cmpl;
+ }
+
+ BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+
+ if (likely(ccb)) {
+ bna_ib_ack(ccb->i_dbell, packets);
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
+ } else
+ bna_ib_ack(ccb->i_dbell, 0);
+
+ return packets;
+}
+
+static void
+bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
+ bna_ib_ack(ccb->i_dbell, 0);
+}
+
+static void
+bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
+ bnad_enable_rx_irq_unsafe(ccb);
+ spin_unlock_irq(&bnad->bna_lock);
+}
+
+static void
+bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+ if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
+ bnad_disable_rx_irq(bnad, ccb);
+ __napi_schedule((&rx_ctrl->napi));
+ }
+ BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
+}
+
+/* MSIX Rx Path Handler */
+static irqreturn_t
+bnad_msix_rx(int irq, void *data)
+{
+ struct bna_ccb *ccb = (struct bna_ccb *)data;
+ struct bnad *bnad = ccb->bnad;
+
+ bnad_netif_rx_schedule_poll(bnad, ccb);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handlers */
+
+/* Mbox Interrupt Handlers */
+static irqreturn_t
+bnad_msix_mbox_handler(int irq, void *data)
+{
+ u32 intr_status;
+ unsigned long flags;
+ struct net_device *netdev = data;
+ struct bnad *bnad;
+
+ bnad = netdev_priv(netdev);
+
+ /* BNA_ISR_GET(bnad); Inc Ref count */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+
+ if (BNA_IS_MBOX_ERR_INTR(intr_status))
+ bna_mbox_handler(&bnad->bna, intr_status);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* BNAD_ISR_PUT(bnad); Dec Ref count */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnad_isr(int irq, void *data)
+{
+ int i, j;
+ u32 intr_status;
+ unsigned long flags;
+ struct net_device *netdev = data;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bna_intr_status_get(&bnad->bna, intr_status);
+ if (!intr_status) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+ bna_mbox_handler(&bnad->bna, intr_status);
+ if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Process data interrupts */
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb)
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+done:
+ return IRQ_HANDLED;
+}
+
+/*
+ * Called in interrupt / callback context
+ * with bna_lock held, so cfg_flags access is OK
+ */
+static void
+bnad_enable_mbox_irq(struct bnad *bnad)
+{
+ int irq = BNAD_GET_MBOX_IRQ(bnad);
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ return;
+
+ if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+ enable_irq(irq);
+ BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
+}
+
+/*
+ * Called with bnad->bna_lock held b'cos of
+ * bnad->cfg_flags access.
+ */
+void
+bnad_disable_mbox_irq(struct bnad *bnad)
+{
+ int irq = BNAD_GET_MBOX_IRQ(bnad);
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ return;
+
+ if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+ disable_irq_nosync(irq);
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+}
+
+/* Control Path Handlers */
+
+/* Callbacks */
+void
+bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
+{
+ bnad_enable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
+{
+ bnad_disable_mbox_irq(bnad);
+}
+
+void
+bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
+{
+ complete(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = status;
+}
+
+void
+bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
+{
+ complete(&bnad->bnad_completions.ioc_comp);
+ bnad->bnad_completions.ioc_comp_status = status;
+}
+
+static void
+bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.port_comp);
+
+ netif_carrier_off(bnad->netdev);
+}
+
+void
+bnad_cb_port_link_status(struct bnad *bnad,
+ enum bna_link_status link_status)
+{
+ bool link_up = 0;
+
+ link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
+
+ if (link_status == BNA_CEE_UP) {
+ set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+ BNAD_UPDATE_CTR(bnad, cee_up);
+ } else
+ clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
+
+ if (link_up) {
+ if (!netif_carrier_ok(bnad->netdev)) {
+ pr_warn("bna: %s link up\n",
+ bnad->netdev->name);
+ netif_carrier_on(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
+ /* Force an immediate Transmit Schedule */
+ pr_info("bna: %s TX_STARTED\n",
+ bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ } else {
+ netif_stop_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+ }
+ } else {
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_warn("bna: %s link down\n",
+ bnad->netdev->name);
+ netif_carrier_off(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, link_toggle);
+ }
+ }
+}
+
+static void
+bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
+ enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.tx_comp);
+}
+
+static void
+bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ tx_info->tcb[tcb->id] = tcb;
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+
+ tx_info->tcb[tcb->id] = NULL;
+}
+
+static void
+bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+ unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
+}
+
+static void
+bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = ccb;
+ ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
+}
+
+static void
+bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
+{
+ struct bnad_rx_info *rx_info =
+ (struct bnad_rx_info *)ccb->cq->rx->priv;
+
+ rx_info->rx_ctrl[ccb->id].ccb = NULL;
+}
+
+static void
+bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_tx_info *tx_info =
+ (struct bnad_tx_info *)tcb->txq->tx->priv;
+
+ if (tx_info != &bnad->tx_info[0])
+ return;
+
+ clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
+ netif_stop_queue(bnad->netdev);
+ pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
+}
+
+static void
+bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ return;
+
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+}
+
+static void
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ struct bnad_unmap_q *unmap_q;
+
+ if (!tcb || (!tcb->unmap_q))
+ return;
+
+ unmap_q = tcb->unmap_q;
+ if (!unmap_q->unmap_array)
+ return;
+
+ if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ return;
+
+ bnad_free_all_txbufs(bnad, tcb);
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+}
+
+static void
+bnad_cb_rx_cleanup(struct bnad *bnad,
+ struct bna_ccb *ccb)
+{
+ bnad_cq_cmpl_init(bnad, ccb);
+
+ bnad_free_rxbufs(bnad, ccb->rcb[0]);
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1]) {
+ bnad_free_rxbufs(bnad, ccb->rcb[1]);
+ clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+ }
+}
+
+static void
+bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+
+ set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+
+ /* Now allocate & post buffers for this RCB */
+ /* !!Allocation in callback context */
+ if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
+ if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
+ >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
+ bnad_alloc_n_post_rxbufs(bnad, rcb);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
+ }
+}
+
+static void
+bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ struct bnad *bnad = (struct bnad *)arg;
+
+ complete(&bnad->bnad_completions.rx_comp);
+}
+
+static void
+bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
+ enum bna_cb_status status)
+{
+ bnad->bnad_completions.mcast_comp_status = status;
+ complete(&bnad->bnad_completions.mcast_comp);
+}
+
+void
+bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ struct bna_stats *stats)
+{
+ if (status == BNA_CB_SUCCESS)
+ BNAD_UPDATE_CTR(bnad, hw_stats_updates);
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+}
+
+void
+bnad_cb_stats_clr(struct bnad *bnad)
+{
+}
+
+/* Resource allocation, free functions */
+
+static void
+bnad_mem_free(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if (mem_info->mdl == NULL)
+ return;
+
+ for (i = 0; i < mem_info->num; i++) {
+ if (mem_info->mdl[i].kva != NULL) {
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
+ dma_pa);
+ pci_free_consistent(bnad->pcidev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
+ } else
+ kfree(mem_info->mdl[i].kva);
+ }
+ }
+ kfree(mem_info->mdl);
+ mem_info->mdl = NULL;
+}
+
+static int
+bnad_mem_alloc(struct bnad *bnad,
+ struct bna_mem_info *mem_info)
+{
+ int i;
+ dma_addr_t dma_pa;
+
+ if ((mem_info->num == 0) || (mem_info->len == 0)) {
+ mem_info->mdl = NULL;
+ return 0;
+ }
+
+ mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
+ GFP_KERNEL);
+ if (mem_info->mdl == NULL)
+ return -ENOMEM;
+
+ if (mem_info->mem_type == BNA_MEM_T_DMA) {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva =
+ pci_alloc_consistent(bnad->pcidev,
+ mem_info->len, &dma_pa);
+
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+
+ BNA_SET_DMA_ADDR(dma_pa,
+ &(mem_info->mdl[i].dma));
+ }
+ } else {
+ for (i = 0; i < mem_info->num; i++) {
+ mem_info->mdl[i].len = mem_info->len;
+ mem_info->mdl[i].kva = kzalloc(mem_info->len,
+ GFP_KERNEL);
+ if (mem_info->mdl[i].kva == NULL)
+ goto err_return;
+ }
+ }
+
+ return 0;
+
+err_return:
+ bnad_mem_free(bnad, mem_info);
+ return -ENOMEM;
+}
+
+/* Free IRQ for Mailbox */
+static void
+bnad_mbox_irq_free(struct bnad *bnad,
+ struct bna_intr_info *intr_info)
+{
+ int irq;
+ unsigned long flags;
+
+ if (intr_info->idl == NULL)
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bnad_disable_mbox_irq(bnad);
+
+ irq = BNAD_GET_MBOX_IRQ(bnad);
+ free_irq(irq, bnad->netdev);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ kfree(intr_info->idl);
+}
+
+/*
+ * Allocates IRQ for Mailbox, but keep it disabled
+ * This will be enabled once we get the mbox enable callback
+ * from bna
+ */
+static int
+bnad_mbox_irq_alloc(struct bnad *bnad,
+ struct bna_intr_info *intr_info)
+{
+ int err;
+ unsigned long flags;
+ u32 irq;
+ irq_handler_t irq_handler;
+
+ /* Mbox should use only 1 vector */
+
+ intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX) {
+ irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
+ irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ flags = 0;
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl[0].vector = bnad->msix_num - 1;
+ } else {
+ irq_handler = (irq_handler_t)bnad_isr;
+ irq = bnad->pcidev->irq;
+ flags = IRQF_SHARED;
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ /* intr_info->idl.vector = 0 ? */
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
+
+ err = request_irq(irq, irq_handler, flags,
+ bnad->mbox_irq_name, bnad->netdev);
+ if (err) {
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+ return err;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_disable_mbox_irq(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return 0;
+}
+
+static void
+bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
+{
+ kfree(intr_info->idl);
+ intr_info->idl = NULL;
+}
+
+/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
+static int
+bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
+ uint txrx_id, struct bna_intr_info *intr_info)
+{
+ int i, vector_start = 0;
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ intr_info->intr_type = BNA_INTR_T_MSIX;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ vector_start = txrx_id;
+ break;
+
+ case BNAD_INTR_RX:
+ vector_start = bnad->num_tx * bnad->num_txq_per_tx +
+ txrx_id;
+ break;
+
+ default:
+ BUG();
+ }
+
+ for (i = 0; i < intr_info->num; i++)
+ intr_info->idl[i].vector = vector_start + i;
+ } else {
+ intr_info->intr_type = BNA_INTR_T_INTX;
+ intr_info->num = 1;
+ intr_info->idl = kcalloc(intr_info->num,
+ sizeof(struct bna_intr_descr),
+ GFP_KERNEL);
+ if (!intr_info->idl)
+ return -ENOMEM;
+
+ switch (src) {
+ case BNAD_INTR_TX:
+ intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
+ break;
+
+ case BNAD_INTR_RX:
+ intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Tx MSIX vector(s) from the kernel
+ */
+static void
+bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ int num_txqs)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ if (tx_info->tcb[i] == NULL)
+ continue;
+
+ vector_num = tx_info->tcb[i]->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
+ uint tx_id, int num_txqs)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_txqs; i++) {
+ vector_num = tx_info->tcb[i]->intr_vector;
+ sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_tx, 0,
+ tx_info->tcb[i]->name,
+ tx_info->tcb[i]);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
+ return -1;
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Unregisters Rx MSIX vector(s) from the kernel
+ */
+static void
+bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ int num_rxps)
+{
+ int i;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ if (rx_info->rx_ctrl[i].ccb == NULL)
+ continue;
+
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ free_irq(bnad->msix_table[vector_num].vector,
+ rx_info->rx_ctrl[i].ccb);
+ }
+}
+
+/**
+ * NOTE: Should be called for MSIX only
+ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
+ */
+static int
+bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
+ uint rx_id, int num_rxps)
+{
+ int i;
+ int err;
+ int vector_num;
+
+ for (i = 0; i < num_rxps; i++) {
+ vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
+ sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
+ bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
+ err = request_irq(bnad->msix_table[vector_num].vector,
+ (irq_handler_t)bnad_msix_rx, 0,
+ rx_info->rx_ctrl[i].ccb->name,
+ rx_info->rx_ctrl[i].ccb);
+ if (err)
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ if (i > 0)
+ bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
+ return -1;
+}
+
+/* Free Tx object Resources */
+static void
+bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Tx object */
+static int
+bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint tx_id)
+{
+ int i, err = 0;
+
+ for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Free Rx object Resources */
+static void
+bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
+{
+ int i;
+
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for Rx object */
+static int
+bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
+ uint rx_id)
+{
+ int i, err = 0;
+
+ /* All memory needs to be allocated before setup_ccbs */
+ for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad,
+ &res_info[i].res_u.mem_info);
+ else if (res_info[i].res_type == BNA_RES_T_INTR)
+ err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_rx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Timer callbacks */
+/* a) IOC timer */
+static void
+bnad_ioc_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_hb_check(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_ioc_sem_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * All timer routines use bnad->bna_lock to protect against
+ * the following race, which may occur in case of no locking:
+ * Time CPU m CPU n
+ * 0 1 = test_bit
+ * 1 clear_bit
+ * 2 del_timer_sync
+ * 3 mod_timer
+ */
+
+/* b) Dynamic Interrupt Moderation Timer */
+static void
+bnad_dim_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i, j;
+ unsigned long flags;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (!rx_ctrl->ccb)
+ continue;
+ bna_rx_dim_update(rx_ctrl->ccb);
+ }
+ }
+
+ /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
+ if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/* c) Statistics Timer */
+static void
+bnad_stats_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ if (!netif_running(bnad->netdev) ||
+ !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ return;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_stats_get(&bnad->bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * Set up timer for DIM
+ * Called with bnad->bna_lock held
+ */
+void
+bnad_dim_timer_start(struct bnad *bnad)
+{
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->dim_timer, bnad_dim_timeout,
+ (unsigned long)bnad);
+ set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ mod_timer(&bnad->dim_timer,
+ jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
+ }
+}
+
+/*
+ * Set up timer for statistics
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_start(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
+ setup_timer(&bnad->stats_timer, bnad_stats_timeout,
+ (unsigned long)bnad);
+ mod_timer(&bnad->stats_timer,
+ jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+}
+
+/*
+ * Stops the stats timer
+ * Called with mutex_lock(&bnad->conf_mutex) held
+ */
+static void
+bnad_stats_timer_stop(struct bnad *bnad)
+{
+ int to_del = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
+ to_del = 1;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->stats_timer);
+}
+
+/* Utilities */
+
+static void
+bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
+{
+ int i = 1; /* Index 0 has broadcast address */
+ struct netdev_hw_addr *mc_addr;
+
+ netdev_for_each_mc_addr(mc_addr, netdev) {
+ memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
+ ETH_ALEN);
+ i++;
+ }
+}
+
+static int
+bnad_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ int rcvd = 0;
+
+ ccb = rx_ctrl->ccb;
+
+ bnad = ccb->bnad;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ rcvd = bnad_poll_cq(bnad, ccb, budget);
+ if (rcvd == budget)
+ return rcvd;
+
+poll_exit:
+ napi_complete((napi));
+
+ BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+
+ bnad_enable_rx_irq(bnad, ccb);
+ return rcvd;
+}
+
+static int
+bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
+{
+ struct bnad_rx_ctrl *rx_ctrl =
+ container_of(napi, struct bnad_rx_ctrl, napi);
+ struct bna_ccb *ccb;
+ struct bnad *bnad;
+ int rcvd = 0;
+ int i, j;
+
+ ccb = rx_ctrl->ccb;
+
+ bnad = ccb->bnad;
+
+ if (!netif_carrier_ok(bnad->netdev))
+ goto poll_exit;
+
+ /* Handle Tx Completions, if any */
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ }
+
+ /* Handle Rx Completions */
+ rcvd = bnad_poll_cq(bnad, ccb, budget);
+ if (rcvd == budget)
+ return rcvd;
+poll_exit:
+ napi_complete((napi));
+
+ BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+
+ bnad_enable_txrx_irqs(bnad);
+ return rcvd;
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ int (*napi_poll) (struct napi_struct *, int);
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ napi_poll = bnad_napi_poll_rx;
+ else
+ napi_poll = bnad_napi_poll_txrx;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+ netif_napi_add(bnad->netdev, &rx_ctrl->napi,
+ napi_poll, 64);
+ napi_enable(&rx_ctrl->napi);
+ }
+}
+
+static void
+bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+{
+ int i;
+
+ /* First disable and then clean up */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+ }
+}
+
+/* Should be held with conf_lock held */
+void
+bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
+{
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ unsigned long flags;
+
+ if (!tx_info->tx)
+ return;
+
+ init_completion(&bnad->bnad_completions.tx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.tx_comp);
+
+ if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
+ bnad_tx_msix_unregister(bnad, tx_info,
+ bnad->num_txq_per_tx);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_destroy(tx_info->tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ tx_info->tx = NULL;
+
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
+ bnad_tx_res_free(bnad, res_info);
+}
+
+/* Should be held with conf_lock held */
+int
+bnad_setup_tx(struct bnad *bnad, uint tx_id)
+{
+ int err;
+ struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
+ struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
+ struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
+ struct bna_tx_event_cbfn tx_cbfn;
+ struct bna_tx *tx;
+ unsigned long flags;
+
+ /* Initialize the Tx object configuration */
+ tx_config->num_txq = bnad->num_txq_per_tx;
+ tx_config->txq_depth = bnad->txq_depth;
+ tx_config->tx_type = BNA_TX_T_REGULAR;
+
+ /* Initialize the tx event handlers */
+ tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
+ tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
+ tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
+ tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
+ tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
+
+ /* Get BNA's resource requirement for one tx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_res_req(bnad->num_txq_per_tx,
+ bnad->txq_depth, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
+ bnad->num_txq_per_tx,
+ BNAD_TX_UNMAPQ_DEPTH);
+
+ /* Allocate resources */
+ err = bnad_tx_res_alloc(bnad, res_info, tx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Tx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
+ tx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!tx)
+ goto err_return;
+ tx_info->tx = tx;
+
+ /* Register ISR for the Tx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_tx_msix_register(bnad, tx_info,
+ tx_id, bnad->num_txq_per_tx);
+ if (err)
+ goto err_return;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_tx_enable(tx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_tx_res_free(bnad, res_info);
+ return err;
+}
+
+/* Setup the rx config for bna_rx_create */
+/* bnad decides the configuration */
+static void
+bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
+{
+ rx_config->rx_type = BNA_RX_T_REGULAR;
+ rx_config->num_paths = bnad->num_rxp_per_rx;
+
+ if (bnad->num_rxp_per_rx > 1) {
+ rx_config->rss_status = BNA_STATUS_T_ENABLED;
+ rx_config->rss_config.hash_type =
+ (BFI_RSS_T_V4_TCP |
+ BFI_RSS_T_V6_TCP |
+ BFI_RSS_T_V4_IP |
+ BFI_RSS_T_V6_IP);
+ rx_config->rss_config.hash_mask =
+ bnad->num_rxp_per_rx - 1;
+ get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
+ sizeof(rx_config->rss_config.toeplitz_hash_key));
+ } else {
+ rx_config->rss_status = BNA_STATUS_T_DISABLED;
+ memset(&rx_config->rss_config, 0,
+ sizeof(rx_config->rss_config));
+ }
+ rx_config->rxp_type = BNA_RXP_SLR;
+ rx_config->q_depth = bnad->rxq_depth;
+
+ rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+
+ rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
+bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ unsigned long flags;
+ int dim_timer_del = 0;
+
+ if (!rx_info->rx)
+ return;
+
+ if (0 == rx_id) {
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ dim_timer_del = bnad_dim_timer_running(bnad);
+ if (dim_timer_del)
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (dim_timer_del)
+ del_timer_sync(&bnad->dim_timer);
+ }
+
+ bnad_napi_disable(bnad, rx_id);
+
+ init_completion(&bnad->bnad_completions.rx_comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&bnad->bnad_completions.rx_comp);
+
+ if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
+ bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_destroy(rx_info->rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ rx_info->rx = NULL;
+
+ bnad_rx_res_free(bnad, res_info);
+}
+
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+int
+bnad_setup_rx(struct bnad *bnad, uint rx_id)
+{
+ int err;
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
+ struct bna_intr_info *intr_info =
+ &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
+ struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
+ struct bna_rx_event_cbfn rx_cbfn;
+ struct bna_rx *rx;
+ unsigned long flags;
+
+ /* Initialize the Rx object configuration */
+ bnad_init_rx_config(bnad, rx_config);
+
+ /* Initialize the Rx event handlers */
+ rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
+ rx_cbfn.rcb_destroy_cbfn = NULL;
+ rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
+ rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
+ rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
+ rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
+
+ /* Get BNA's resource requirement for one Rx object */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_res_req(rx_config, res_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Fill Unmap Q memory requirements */
+ BNAD_FILL_UNMAPQ_MEM_REQ(
+ &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
+ rx_config->num_paths +
+ ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
+ rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
+
+ /* Allocate resource */
+ err = bnad_rx_res_alloc(bnad, res_info, rx_id);
+ if (err)
+ return err;
+
+ /* Ask BNA to create one Rx object, supplying required resources */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
+ rx_info);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!rx)
+ goto err_return;
+ rx_info->rx = rx;
+
+ /* Register ISR for the Rx object */
+ if (intr_info->intr_type == BNA_INTR_T_MSIX) {
+ err = bnad_rx_msix_register(bnad, rx_info, rx_id,
+ rx_config->num_paths);
+ if (err)
+ goto err_return;
+ }
+
+ /* Enable NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (0 == rx_id) {
+ /* Set up Dynamic Interrupt Moderation Vector */
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
+ bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
+
+ /* Enable VLAN filtering only on the default Rx */
+ bna_rx_vlanfilter_enable(rx);
+
+ /* Start the DIM timer */
+ bnad_dim_timer_start(bnad);
+ }
+
+ bna_rx_enable(rx);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return 0;
+
+err_return:
+ bnad_cleanup_rx(bnad, rx_id);
+ return err;
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_tx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_tx_info *tx_info;
+
+ tx_info = &bnad->tx_info[0];
+ if (!tx_info->tx)
+ return;
+
+ bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
+}
+
+/* Called with conf_lock & bnad->bna_lock held */
+void
+bnad_rx_coalescing_timeo_set(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info;
+ int i;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ bna_rx_coalescing_timeo_set(rx_info->rx,
+ bnad->rx_coalescing_timeo);
+ }
+}
+
+/*
+ * Called with bnad->bna_lock held
+ */
+static int
+bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
+{
+ int ret;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EADDRNOTAVAIL;
+
+ /* If datapath is down, pretend everything went through */
+ if (!bnad->rx_info[0].rx)
+ return 0;
+
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ if (ret != BNA_CB_SUCCESS)
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+
+/* Should be called with conf_lock held */
+static int
+bnad_enable_default_bcast(struct bnad *bnad)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[0];
+ int ret;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.mcast_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
+ bnad_cb_rx_mcast_add);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (ret == BNA_CB_SUCCESS)
+ wait_for_completion(&bnad->bnad_completions.mcast_comp);
+ else
+ return -ENODEV;
+
+ if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* Statistics utilities */
+void
+bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
+{
+ int i, j;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ stats->rx_packets += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
+ stats->rx_bytes += bnad->rx_info[i].
+ rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ stats->rx_packets +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_packets;
+ stats->rx_bytes +=
+ bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1]->rxq->rx_bytes;
+ }
+ }
+ }
+ }
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ if (bnad->tx_info[i].tcb[j]) {
+ stats->tx_packets +=
+ bnad->tx_info[i].tcb[j]->txq->tx_packets;
+ stats->tx_bytes +=
+ bnad->tx_info[i].tcb[j]->txq->tx_bytes;
+ }
+ }
+ }
+}
+
+/*
+ * Must be called with the bna_lock held.
+ */
+void
+bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
+{
+ struct bfi_ll_stats_mac *mac_stats;
+ u64 bmap;
+ int i;
+
+ mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
+ stats->rx_errors =
+ mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
+ mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
+ mac_stats->rx_undersize;
+ stats->tx_errors = mac_stats->tx_fcs_error +
+ mac_stats->tx_undersize;
+ stats->rx_dropped = mac_stats->rx_drop;
+ stats->tx_dropped = mac_stats->tx_drop;
+ stats->multicast = mac_stats->rx_multicast;
+ stats->collisions = mac_stats->tx_total_collision;
+
+ stats->rx_length_errors = mac_stats->rx_frame_length_error;
+
+ /* receive ring buffer overflow ?? */
+
+ stats->rx_crc_errors = mac_stats->rx_fcs_error;
+ stats->rx_frame_errors = mac_stats->rx_alignment_error;
+ /* recv'r fifo overrun */
+ bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
+ ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats->rx_fifo_errors +=
+ bnad->stats.bna_stats->
+ hw_stats->rxf_stats[i].frame_drops;
+ break;
+ }
+ bmap >>= 1;
+ }
+}
+
+static void
+bnad_mbox_irq_sync(struct bnad *bnad)
+{
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ else
+ irq = bnad->pcidev->irq;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ synchronize_irq(irq);
+}
+
+/* Utility used by bnad_start_xmit, for doing TSO */
+static int
+bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
+{
+ int err;
+
+ /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
+ BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
+ }
+ }
+
+ /*
+ * For TSO, the TCP checksum field is seeded with pseudo-header sum
+ * excluding the length field.
+ */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* Do we really need these? */
+ iph->tot_len = 0;
+ iph->check = 0;
+
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso4);
+ } else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
+ ipv6h->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
+ IPPROTO_TCP, 0);
+ BNAD_UPDATE_CTR(bnad, tso6);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize Q numbers depending on Rx Paths
+ * Called with bnad->bna_lock held, because of cfg_flags
+ * access.
+ */
+static void
+bnad_q_num_init(struct bnad *bnad)
+{
+ int rxps;
+
+ rxps = min((uint)num_online_cpus(),
+ (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+ rxps = 1; /* INTx */
+
+ bnad->num_rx = 1;
+ bnad->num_tx = 1;
+ bnad->num_rxp_per_rx = rxps;
+ bnad->num_txq_per_tx = BNAD_TXQ_NUM;
+}
+
+/*
+ * Adjusts the Q numbers, given a number of msix vectors
+ * Give preference to RSS as opposed to Tx priority Queues,
+ * in such a case, just use 1 Tx Q
+ * Called with bnad->bna_lock held b'cos of cfg_flags access
+ */
+static void
+bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
+{
+ bnad->num_txq_per_tx = 1;
+ if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
+ bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
+ (bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bnad->num_rxp_per_rx = msix_vectors -
+ (bnad->num_tx * bnad->num_txq_per_tx) -
+ BNAD_MAILBOX_MSIX_VECTORS;
+ } else
+ bnad->num_rxp_per_rx = 1;
+}
+
+static void
+bnad_set_netdev_perm_addr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ if (is_zero_ether_addr(netdev->dev_addr))
+ memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+}
+
+/* Enable / disable device */
+static void
+bnad_device_disable(struct bnad *bnad)
+{
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.ioc_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.ioc_comp);
+
+}
+
+static int
+bnad_device_enable(struct bnad *bnad)
+{
+ int err = 0;
+ unsigned long flags;
+
+ init_completion(&bnad->bnad_completions.ioc_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_device_enable(&bnad->bna.device);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.ioc_comp);
+
+ if (bnad->bnad_completions.ioc_comp_status)
+ err = bnad->bnad_completions.ioc_comp_status;
+
+ return err;
+}
+
+/* Free BNA resources */
+static void
+bnad_res_free(struct bnad *bnad)
+{
+ int i;
+ struct bna_res_info *res_info = &bnad->res_info[0];
+
+ for (i = 0; i < BNA_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
+ else
+ bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
+ }
+}
+
+/* Allocates memory and interrupt resources for BNA */
+static int
+bnad_res_alloc(struct bnad *bnad)
+{
+ int i, err;
+ struct bna_res_info *res_info = &bnad->res_info[0];
+
+ for (i = 0; i < BNA_RES_T_MAX; i++) {
+ if (res_info[i].res_type == BNA_RES_T_MEM)
+ err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
+ else
+ err = bnad_mbox_irq_alloc(bnad,
+ &res_info[i].res_u.intr_info);
+ if (err)
+ goto err_return;
+ }
+ return 0;
+
+err_return:
+ bnad_res_free(bnad);
+ return err;
+}
+
+/* Interrupt enable / disable */
+static void
+bnad_enable_msix(struct bnad *bnad)
+{
+ int i, ret;
+ u32 tot_msix_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (bnad->msix_table)
+ return;
+
+ tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
+
+ bnad->msix_table =
+ kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!bnad->msix_table)
+ goto intx_mode;
+
+ for (i = 0; i < tot_msix_num; i++)
+ bnad->msix_table[i].entry = i;
+
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
+ if (ret > 0) {
+ /* Not enough MSI-X vectors. */
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ /* ret = #of vectors that we got */
+ bnad_q_num_adjust(bnad, ret);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
+ + (bnad->num_rx
+ * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+ tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
+
+ /* Try once more with adjusted numbers */
+ /* If this fails, fall back to INTx */
+ ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+ tot_msix_num);
+ if (ret)
+ goto intx_mode;
+
+ } else if (ret < 0)
+ goto intx_mode;
+ return;
+
+intx_mode:
+
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ bnad->msix_num = 0;
+ bnad->msix_diag_num = 0;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_disable_msix(struct bnad *bnad)
+{
+ u32 cfg_flags;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ cfg_flags = bnad->cfg_flags;
+ if (bnad->cfg_flags & BNAD_CF_MSIX)
+ bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ if (cfg_flags & BNAD_CF_MSIX) {
+ pci_disable_msix(bnad->pcidev);
+ kfree(bnad->msix_table);
+ bnad->msix_table = NULL;
+ }
+}
+
+/* Netdev entry points */
+static int
+bnad_open(struct net_device *netdev)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ int mtu;
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Tx */
+ err = bnad_setup_tx(bnad, 0);
+ if (err)
+ goto err_return;
+
+ /* Rx */
+ err = bnad_setup_rx(bnad, 0);
+ if (err)
+ goto cleanup_tx;
+
+ /* Port */
+ pause_config.tx_pause = 0;
+ pause_config.rx_pause = 0;
+
+ mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
+ bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+ bna_port_enable(&bnad->bna.port);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Enable broadcast */
+ bnad_enable_default_bcast(bnad);
+
+ /* Set the UCAST address */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Start the stats timer */
+ bnad_stats_timer_start(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+
+cleanup_tx:
+ bnad_cleanup_tx(bnad, 0);
+
+err_return:
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static int
+bnad_stop(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ /* Stop the stats timer */
+ bnad_stats_timer_stop(bnad);
+
+ init_completion(&bnad->bnad_completions.port_comp);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
+ bnad_cb_port_disabled);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ wait_for_completion(&bnad->bnad_completions.port_comp);
+
+ bnad_cleanup_tx(bnad, 0);
+ bnad_cleanup_rx(bnad, 0);
+
+ /* Synchronize mailbox IRQ */
+ bnad_mbox_irq_sync(bnad);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
+}
+
+/* TX */
+/*
+ * bnad_start_xmit : Netdev entry point for Transmit
+ * Called under lock held by net_device
+ */
+static netdev_tx_t
+bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ u16 txq_prod, vlan_tag = 0;
+ u32 unmap_prod, wis, wis_used, wi_range;
+ u32 vectors, vect_id, i, acked;
+ u32 tx_id;
+ int err;
+
+ struct bnad_tx_info *tx_info;
+ struct bna_tcb *tcb;
+ struct bnad_unmap_q *unmap_q;
+ dma_addr_t dma_addr;
+ struct bna_txq_entry *txqent;
+ bna_txq_wi_ctrl_flag_t flags;
+
+ if (unlikely
+ (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * Takes care of the Tx that is scheduled between clearing the flag
+ * and the netif_stop_queue() call.
+ */
+ if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_id = 0;
+
+ tx_info = &bnad->tx_info[tx_id];
+ tcb = tx_info->tcb[tx_id];
+ unmap_q = tcb->unmap_q;
+
+ vectors = 1 + skb_shinfo(skb)->nr_frags;
+ if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
+ acked = 0;
+ if (unlikely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ if ((u16) (*tcb->hw_consumer_index) !=
+ tcb->consumer_index &&
+ !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+ acked = bnad_free_txbufs(bnad, tcb);
+ bna_ib_ack(tcb->i_dbell, acked);
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ } else {
+ netif_stop_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ }
+
+ smp_mb();
+ /*
+ * Check again to deal with race condition between
+ * netif_stop_queue here, and netif_wake_queue in
+ * interrupt handler which is not inside netif tx lock.
+ */
+ if (likely
+ (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
+ vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
+ BNAD_UPDATE_CTR(bnad, netif_queue_stop);
+ return NETDEV_TX_BUSY;
+ } else {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+ }
+
+ unmap_prod = unmap_q->producer_index;
+ wis_used = 1;
+ vect_id = 0;
+ flags = 0;
+
+ txq_prod = tcb->producer_index;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
+ BUG_ON(!(wi_range <= tcb->q_depth));
+ txqent->hdr.wi.reserved = 0;
+ txqent->hdr.wi.num_vectors = vectors;
+ txqent->hdr.wi.opcode =
+ htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
+ BNA_TXQ_WI_SEND));
+
+ if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
+ vlan_tag = (u16) vlan_tx_tag_get(skb);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+ if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
+ vlan_tag =
+ (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
+ flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
+ }
+
+ txqent->hdr.wi.vlan_tag = htons(vlan_tag);
+
+ if (skb_is_gso(skb)) {
+ err = bnad_tso_prepare(bnad, skb);
+ if (err) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+ flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (tcp_hdrlen(skb) >> 2,
+ skb_transport_offset(skb)));
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
+
+ txqent->hdr.wi.lso_mss = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ BUG_ON(!(skb_headlen(skb) >=
+ skb_transport_offset(skb) + tcp_hdrlen(skb)));
+
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+
+ BUG_ON(!(skb_headlen(skb) >=
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr)));
+ } else {
+ err = skb_checksum_help(skb);
+ BNAD_UPDATE_CTR(bnad, csum_help);
+ if (err) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, csum_help_err);
+ return NETDEV_TX_OK;
+ }
+ }
+ } else {
+ txqent->hdr.wi.lso_mss = 0;
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
+ }
+
+ txqent->hdr.wi.flags = htons(flags);
+
+ txqent->hdr.wi.frame_length = htonl(skb->len);
+
+ unmap_q->unmap_array[unmap_prod].skb = skb;
+ BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(skb_headlen(skb));
+ dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+
+ if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
+ vect_id = 0;
+ if (--wi_range)
+ txqent++;
+ else {
+ BNA_QE_INDX_ADD(txq_prod, wis_used,
+ tcb->q_depth);
+ wis_used = 0;
+ BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
+ txqent, wi_range);
+ BUG_ON(!(wi_range <= tcb->q_depth));
+ }
+ wis_used++;
+ txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+ }
+
+ BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
+ txqent->vector[vect_id].length = htons(size);
+ dma_addr =
+ pci_map_page(bnad->pcidev, frag->page,
+ frag->page_offset, size,
+ PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ }
+
+ unmap_q->producer_index = unmap_prod;
+ BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
+ tcb->producer_index = txq_prod;
+
+ smp_mb();
+ bna_txq_prod_indx_doorbell(tcb);
+
+ if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
+ tasklet_schedule(&bnad->tx_free_tasklet);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * Used spin_lock to synchronize reading of stats structures, which
+ * is written by BNA under the same lock.
+ */
+static struct rtnl_link_stats64 *
+bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ bnad_netdev_qstats_fill(bnad, stats);
+ bnad_netdev_hwstats_fill(bnad, stats);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return stats;
+}
+
+static void
+bnad_set_rx_mode(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ u32 new_mask, valid_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ new_mask = valid_mask = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
+ new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags |= BNAD_CF_PROMISC;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_PROMISC) {
+ new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
+ valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
+ bnad->cfg_flags &= ~BNAD_CF_PROMISC;
+ }
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
+ new_mask |= BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags |= BNAD_CF_ALLMULTI;
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
+ new_mask &= ~BNA_RXMODE_ALLMULTI;
+ valid_mask |= BNA_RXMODE_ALLMULTI;
+ bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
+ }
+ }
+
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
+
+ if (!netdev_mc_empty(netdev)) {
+ u8 *mcaddr_list;
+ int mc_count = netdev_mc_count(netdev);
+
+ /* Index 0 holds the broadcast address */
+ mcaddr_list =
+ kzalloc((mc_count + 1) * ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mcaddr_list)
+ goto unlock;
+
+ memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+
+ /* Copy rest of the MC addresses */
+ bnad_netdev_mc_list_get(netdev, mcaddr_list);
+
+ bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
+ mcaddr_list, NULL);
+
+ /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+ kfree(mcaddr_list);
+ }
+unlock:
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+/*
+ * bna_lock is used to sync writes to netdev->addr
+ * conf_lock cannot be used since this call may be made
+ * in a non-blocking context.
+ */
+static int
+bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+{
+ int err;
+ struct bnad *bnad = netdev_priv(netdev);
+ struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
+
+ if (!err)
+ memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return err;
+}
+
+static int
+bnad_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int mtu, err = 0;
+ unsigned long flags;
+
+ struct bnad *bnad = netdev_priv(netdev);
+
+ if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ netdev->mtu = new_mtu;
+
+ mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *vlan_grp)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->vlan_grp = vlan_grp;
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_add_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static void
+bnad_vlan_rx_kill_vid(struct net_device *netdev,
+ unsigned short vid)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ if (!bnad->rx_info[0].rx)
+ return;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+bnad_netpoll(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_rx_info *rx_info;
+ struct bnad_rx_ctrl *rx_ctrl;
+ u32 curr_mask;
+ int i, j;
+
+ if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
+ bna_intx_disable(&bnad->bna, curr_mask);
+ bnad_isr(bnad->pcidev->irq, netdev);
+ bna_intx_enable(&bnad->bna, curr_mask);
+ } else {
+ for (i = 0; i < bnad->num_rx; i++) {
+ rx_info = &bnad->rx_info[i];
+ if (!rx_info->rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ rx_ctrl = &rx_info->rx_ctrl[j];
+ if (rx_ctrl->ccb) {
+ bnad_disable_rx_irq(bnad,
+ rx_ctrl->ccb);
+ bnad_netif_rx_schedule_poll(bnad,
+ rx_ctrl->ccb);
+ }
+ }
+ }
+ }
+}
+#endif
+
+static const struct net_device_ops bnad_netdev_ops = {
+ .ndo_open = bnad_open,
+ .ndo_stop = bnad_stop,
+ .ndo_start_xmit = bnad_start_xmit,
+ .ndo_get_stats64 = bnad_get_stats64,
+ .ndo_set_rx_mode = bnad_set_rx_mode,
+ .ndo_set_multicast_list = bnad_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnad_set_mac_address,
+ .ndo_change_mtu = bnad_change_mtu,
+ .ndo_vlan_rx_register = bnad_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnad_netpoll
+#endif
+};
+
+static void
+bnad_netdev_init(struct bnad *bnad, bool using_dac)
+{
+ struct net_device *netdev = bnad->netdev;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+
+ netdev->features |= NETIF_F_GRO;
+ pr_warn("bna: GRO enabled, using kernel stack GRO\n");
+
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+
+ if (using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |=
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features = netdev->features;
+ netdev->mem_start = bnad->mmio_start;
+ netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
+
+ netdev->netdev_ops = &bnad_netdev_ops;
+ bnad_set_ethtool_ops(netdev);
+}
+
+/*
+ * 1. Initialize the bnad structure
+ * 2. Setup netdev pointer in pci_dev
+ * 3. Initialze Tx free tasklet
+ * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ */
+static int
+bnad_init(struct bnad *bnad,
+ struct pci_dev *pdev, struct net_device *netdev)
+{
+ unsigned long flags;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ pci_set_drvdata(pdev, netdev);
+
+ bnad->netdev = netdev;
+ bnad->pcidev = pdev;
+ bnad->mmio_start = pci_resource_start(pdev, 0);
+ bnad->mmio_len = pci_resource_len(pdev, 0);
+ bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
+ if (!bnad->bar0) {
+ dev_err(&pdev->dev, "ioremap for bar0 failed\n");
+ pci_set_drvdata(pdev, NULL);
+ return -ENOMEM;
+ }
+ pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (!bnad_msix_disable)
+ bnad->cfg_flags = BNAD_CF_MSIX;
+
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+
+ bnad_q_num_init(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
+ (bnad->num_rx * bnad->num_rxp_per_rx) +
+ BNAD_MAILBOX_MSIX_VECTORS;
+ bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
+
+ bnad->txq_depth = BNAD_TXQ_DEPTH;
+ bnad->rxq_depth = BNAD_RXQ_DEPTH;
+ bnad->rx_csum = true;
+
+ bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
+ bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
+
+ tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
+ (unsigned long)bnad);
+
+ return 0;
+}
+
+/*
+ * Must be called after bnad_pci_uninit()
+ * so that iounmap() and pci_set_drvdata(NULL)
+ * happens only after PCI uninitialization.
+ */
+static void
+bnad_uninit(struct bnad *bnad)
+{
+ if (bnad->bar0)
+ iounmap(bnad->bar0);
+ pci_set_drvdata(bnad->pcidev, NULL);
+}
+
+/*
+ * Initialize locks
+ a) Per device mutes used for serializing configuration
+ changes from OS interface
+ b) spin lock used to protect bna state machine
+ */
+static void
+bnad_lock_init(struct bnad *bnad)
+{
+ spin_lock_init(&bnad->bna_lock);
+ mutex_init(&bnad->conf_mutex);
+}
+
+static void
+bnad_lock_uninit(struct bnad *bnad)
+{
+ mutex_destroy(&bnad->conf_mutex);
+}
+
+/* PCI Initialization */
+static int
+bnad_pci_init(struct bnad *bnad,
+ struct pci_dev *pdev, bool *using_dac)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ err = pci_request_regions(pdev, BNAD_NAME);
+ if (err)
+ goto disable_device;
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ *using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err)
+ goto release_regions;
+ }
+ *using_dac = 0;
+ }
+ pci_set_master(pdev);
+ return 0;
+
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void
+bnad_pci_uninit(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int __devinit
+bnad_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pcidev_id)
+{
+ bool using_dac;
+ int err;
+ struct bnad *bnad;
+ struct bna *bna;
+ struct net_device *netdev;
+ struct bfa_pcidev pcidev_info;
+ unsigned long flags;
+
+ pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
+ pdev, pcidev_id, PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&bnad_fwimg_mutex);
+ if (!cna_get_firmware_buf(pdev)) {
+ mutex_unlock(&bnad_fwimg_mutex);
+ pr_warn("Failed to load Firmware Image!\n");
+ return -ENODEV;
+ }
+ mutex_unlock(&bnad_fwimg_mutex);
+
+ /*
+ * Allocates sizeof(struct net_device + struct bnad)
+ * bnad = netdev->priv
+ */
+ netdev = alloc_etherdev(sizeof(struct bnad));
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
+ err = -ENOMEM;
+ return err;
+ }
+ bnad = netdev_priv(netdev);
+
+ /*
+ * PCI initialization
+ * Output : using_dac = 1 for 64 bit DMA
+ * = 0 for 32 bit DMA
+ */
+ err = bnad_pci_init(bnad, pdev, &using_dac);
+ if (err)
+ goto free_netdev;
+
+ bnad_lock_init(bnad);
+ /*
+ * Initialize bnad structure
+ * Setup relation between pci_dev & netdev
+ * Init Tx free tasklet
+ */
+ err = bnad_init(bnad, pdev, netdev);
+ if (err)
+ goto pci_uninit;
+ /* Initialize netdev structure, set up ethtool ops */
+ bnad_netdev_init(bnad, using_dac);
+
+ bnad_enable_msix(bnad);
+
+ /* Get resource requirement form bna */
+ bna_res_req(&bnad->res_info[0]);
+
+ /* Allocate resources from bna */
+ err = bnad_res_alloc(bnad);
+ if (err)
+ goto free_netdev;
+
+ bna = &bnad->bna;
+
+ /* Setup pcidev_info for bna_init() */
+ pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
+ pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
+ pcidev_info.device_id = bnad->pcidev->device;
+ pcidev_info.pci_bar_kva = bnad->bar0;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ bnad->stats.bna_stats = &bna->stats;
+
+ /* Set up timers */
+ setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
+ ((unsigned long)bnad));
+
+ /* Now start the timer before calling IOC */
+ mod_timer(&bnad->bna.device.ioc.ioc_timer,
+ jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
+
+ /*
+ * Start the chip
+ * Don't care even if err != 0, bna state machine will
+ * deal with it
+ */
+ err = bnad_device_enable(bnad);
+
+ /* Get the burnt-in mac */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_mac_get(&bna->port, &bnad->perm_addr);
+ bnad_set_netdev_perm_addr(bnad);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+
+ /*
+ * Make sure the link appears down to the stack
+ */
+ netif_carrier_off(netdev);
+
+ /* Finally, reguister with net_device layer */
+ err = register_netdev(netdev);
+ if (err) {
+ pr_err("BNA : Registering with netdev failed\n");
+ goto disable_device;
+ }
+
+ return 0;
+
+disable_device:
+ mutex_lock(&bnad->conf_mutex);
+ bnad_device_disable(bnad);
+ del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.device.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
+ bnad_res_free(bnad);
+ bnad_disable_msix(bnad);
+pci_uninit:
+ bnad_pci_uninit(pdev);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+free_netdev:
+ free_netdev(netdev);
+ return err;
+}
+
+static void __devexit
+bnad_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnad *bnad;
+ struct bna *bna;
+ unsigned long flags;
+
+ if (!netdev)
+ return;
+
+ pr_info("%s bnad_pci_remove\n", netdev->name);
+ bnad = netdev_priv(netdev);
+ bna = &bnad->bna;
+
+ unregister_netdev(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad_device_disable(bnad);
+ del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
+ del_timer_sync(&bnad->bna.device.ioc.sem_timer);
+ del_timer_sync(&bnad->bna.device.ioc.hb_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_uninit(bna);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
+ bnad_res_free(bnad);
+ bnad_disable_msix(bnad);
+ bnad_pci_uninit(pdev);
+ bnad_lock_uninit(bnad);
+ bnad_uninit(bnad);
+ free_netdev(netdev);
+}
+
+const struct pci_device_id bnad_pci_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ PCI_DEVICE_ID_BROCADE_CT),
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8,
+ .class_mask = 0xffff00
+ }, {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
+
+static struct pci_driver bnad_pci_driver = {
+ .name = BNAD_NAME,
+ .id_table = bnad_pci_id_table,
+ .probe = bnad_pci_probe,
+ .remove = __devexit_p(bnad_pci_remove),
+};
+
+static int __init
+bnad_module_init(void)
+{
+ int err;
+
+ pr_info("Brocade 10G Ethernet driver\n");
+
+ bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
+
+ err = pci_register_driver(&bnad_pci_driver);
+ if (err < 0) {
+ pr_err("bna : PCI registration failed in module init "
+ "(%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit
+bnad_module_exit(void)
+{
+ pci_unregister_driver(&bnad_pci_driver);
+
+ if (bfi_fw)
+ release_firmware(bfi_fw);
+}
+
+module_init(bnad_module_init);
+module_exit(bnad_module_exit);
+
+MODULE_AUTHOR("Brocade");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
+MODULE_VERSION(BNAD_VERSION);
+MODULE_FIRMWARE(CNA_FW_FILE_CT);
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
new file mode 100644
index 000000000000..ee377888b905
--- /dev/null
+++ b/drivers/net/bna/bnad.h
@@ -0,0 +1,333 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#ifndef __BNAD_H__
+#define __BNAD_H__
+
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <linux/ipv6.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+/* Fix for IA64 */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#include "bna.h"
+
+#define BNAD_TXQ_DEPTH 2048
+#define BNAD_RXQ_DEPTH 2048
+
+#define BNAD_MAX_TXS 1
+#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
+#define BNAD_TXQ_NUM 1
+
+#define BNAD_MAX_RXS 1
+#define BNAD_MAX_RXPS_PER_RX 16
+
+/*
+ * Control structure pointed to ccb->ctrl, which
+ * determines the NAPI / LRO behavior CCB
+ * There is 1:1 corres. between ccb & ctrl
+ */
+struct bnad_rx_ctrl {
+ struct bna_ccb *ccb;
+ struct napi_struct napi;
+};
+
+#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
+
+#define BNAD_GET_TX_ID(_skb) (0)
+
+/*
+ * GLOBAL #defines (CONSTANTS)
+ */
+#define BNAD_NAME "bna"
+#define BNAD_NAME_LEN 64
+
+#define BNAD_VERSION "2.3.2.0"
+
+#define BNAD_MAILBOX_MSIX_VECTORS 1
+
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+
+#define BNAD_MAX_Q_DEPTH 0x10000
+#define BNAD_MIN_Q_DEPTH 0x200
+
+#define BNAD_JUMBO_MTU 9000
+
+#define BNAD_NETIF_WAKE_THRESHOLD 8
+
+#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
+
+/* Bit positions for tcb->flags */
+#define BNAD_TXQ_FREE_SENT 0
+
+/* Bit positions for rcb->flags */
+#define BNAD_RXQ_REFILL 0
+#define BNAD_RXQ_STARTED 1
+
+/*
+ * DATA STRUCTURES
+ */
+
+/* enums */
+enum bnad_intr_source {
+ BNAD_INTR_TX = 1,
+ BNAD_INTR_RX = 2
+};
+
+enum bnad_link_state {
+ BNAD_LS_DOWN = 0,
+ BNAD_LS_UP = 1
+};
+
+struct bnad_completion {
+ struct completion ioc_comp;
+ struct completion ucast_comp;
+ struct completion mcast_comp;
+ struct completion tx_comp;
+ struct completion rx_comp;
+ struct completion stats_comp;
+ struct completion port_comp;
+
+ u8 ioc_comp_status;
+ u8 ucast_comp_status;
+ u8 mcast_comp_status;
+ u8 tx_comp_status;
+ u8 rx_comp_status;
+ u8 stats_comp_status;
+ u8 port_comp_status;
+};
+
+/* Tx Rx Control Stats */
+struct bnad_drv_stats {
+ u64 netif_queue_stop;
+ u64 netif_queue_wakeup;
+ u64 tso4;
+ u64 tso6;
+ u64 tso_err;
+ u64 tcpcsum_offload;
+ u64 udpcsum_offload;
+ u64 csum_help;
+ u64 csum_help_err;
+
+ u64 hw_stats_updates;
+ u64 netif_rx_schedule;
+ u64 netif_rx_complete;
+ u64 netif_rx_dropped;
+
+ u64 link_toggle;
+ u64 cee_up;
+
+ u64 rxp_info_alloc_failed;
+ u64 mbox_intr_disabled;
+ u64 mbox_intr_enabled;
+ u64 tx_unmap_q_alloc_failed;
+ u64 rx_unmap_q_alloc_failed;
+
+ u64 rxbuf_alloc_failed;
+};
+
+/* Complete driver stats */
+struct bnad_stats {
+ struct bnad_drv_stats drv_stats;
+ struct bna_stats *bna_stats;
+};
+
+/* Tx / Rx Resources */
+struct bnad_tx_res_info {
+ struct bna_res_info res_info[BNA_TX_RES_T_MAX];
+};
+
+struct bnad_rx_res_info {
+ struct bna_res_info res_info[BNA_RX_RES_T_MAX];
+};
+
+struct bnad_tx_info {
+ struct bna_tx *tx; /* 1:1 between tx_info & tx */
+ struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
+} ____cacheline_aligned;
+
+struct bnad_rx_info {
+ struct bna_rx *rx; /* 1:1 between rx_info & rx */
+
+ struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
+} ____cacheline_aligned;
+
+/* Unmap queues for Tx / Rx cleanup */
+struct bnad_skb_unmap {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(dma_addr)
+};
+
+struct bnad_unmap_q {
+ u32 producer_index;
+ u32 consumer_index;
+ u32 q_depth;
+ /* This should be the last one */
+ struct bnad_skb_unmap unmap_array[1];
+};
+
+/* Bit mask values for bnad->cfg_flags */
+#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
+#define BNAD_CF_PROMISC 0x02
+#define BNAD_CF_ALLMULTI 0x04
+#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
+
+/* Defines for run_flags bit-mask */
+/* Set, tested & cleared using xxx_bit() functions */
+/* Values indicated bit positions */
+#define BNAD_RF_CEE_RUNNING 1
+#define BNAD_RF_HW_ERROR 2
+#define BNAD_RF_MBOX_IRQ_DISABLED 3
+#define BNAD_RF_TX_STARTED 4
+#define BNAD_RF_RX_STARTED 5
+#define BNAD_RF_DIM_TIMER_RUNNING 6
+#define BNAD_RF_STATS_TIMER_RUNNING 7
+
+struct bnad {
+ struct net_device *netdev;
+
+ /* Data path */
+ struct bnad_tx_info tx_info[BNAD_MAX_TXS];
+ struct bnad_rx_info rx_info[BNAD_MAX_RXS];
+
+ struct vlan_group *vlan_grp;
+ /*
+ * These q numbers are global only because
+ * they are used to calculate MSIx vectors.
+ * Actually the exact # of queues are per Tx/Rx
+ * object.
+ */
+ u32 num_tx;
+ u32 num_rx;
+ u32 num_txq_per_tx;
+ u32 num_rxp_per_rx;
+
+ u32 txq_depth;
+ u32 rxq_depth;
+
+ u8 tx_coalescing_timeo;
+ u8 rx_coalescing_timeo;
+
+ struct bna_rx_config rx_config[BNAD_MAX_RXS];
+ struct bna_tx_config tx_config[BNAD_MAX_TXS];
+
+ u32 rx_csum;
+
+ void __iomem *bar0; /* BAR0 address */
+
+ struct bna bna;
+
+ u32 cfg_flags;
+ unsigned long run_flags;
+
+ struct pci_dev *pcidev;
+ u64 mmio_start;
+ u64 mmio_len;
+
+ u32 msix_num;
+ u32 msix_diag_num;
+ struct msix_entry *msix_table;
+
+ struct mutex conf_mutex;
+ spinlock_t bna_lock ____cacheline_aligned;
+
+ /* Timers */
+ struct timer_list ioc_timer;
+ struct timer_list dim_timer;
+ struct timer_list stats_timer;
+
+ /* Control path resources, memory & irq */
+ struct bna_res_info res_info[BNA_RES_T_MAX];
+ struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
+ struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
+
+ struct bnad_completion bnad_completions;
+
+ /* Burnt in MAC address */
+ mac_t perm_addr;
+
+ struct tasklet_struct tx_free_tasklet;
+
+ /* Statistics */
+ struct bnad_stats stats;
+
+ struct bnad_diag *diag;
+
+ char adapter_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
+ char mbox_irq_name[BNAD_NAME_LEN];
+};
+
+/*
+ * EXTERN VARIABLES
+ */
+extern struct firmware *bfi_fw;
+extern u32 bnad_rxqs_per_cq;
+
+/*
+ * EXTERN PROTOTYPES
+ */
+extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+/* Netdev entry point prototypes */
+extern void bnad_set_ethtool_ops(struct net_device *netdev);
+
+/* Configuration & setup */
+extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+
+extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
+extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
+extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
+extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
+
+/* Timer start/stop protos */
+extern void bnad_dim_timer_start(struct bnad *bnad);
+
+/* Statistics */
+extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
+
+/**
+ * MACROS
+ */
+/* To set & get the stats counters */
+#define BNAD_UPDATE_CTR(_bnad, _ctr) \
+ (((_bnad)->stats.drv_stats._ctr)++)
+
+#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
+
+#define bnad_enable_rx_irq_unsafe(_ccb) \
+{ \
+ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
+ (_ccb)->rx_coalescing_timeo); \
+ bna_ib_ack((_ccb)->i_dbell, 0); \
+}
+
+#define bnad_dim_timer_running(_bnad) \
+ (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
+ (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
+
+#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
new file mode 100644
index 000000000000..11fa2ea842c1
--- /dev/null
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -0,0 +1,1277 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include "cna.h"
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+
+#include "bna.h"
+
+#include "bnad.h"
+
+#define BNAD_NUM_TXF_COUNTERS 12
+#define BNAD_NUM_RXF_COUNTERS 10
+#define BNAD_NUM_CQ_COUNTERS 3
+#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_TXQ_COUNTERS 5
+
+#define BNAD_ETHTOOL_STATS_NUM \
+ (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
+ sizeof(struct bnad_drv_stats) / sizeof(u64) + \
+ offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
+
+static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "multicast",
+ "collisions",
+
+ "rx_length_errors",
+ "rx_over_errors",
+ "rx_crc_errors",
+ "rx_frame_errors",
+ "rx_fifo_errors",
+ "rx_missed_errors",
+
+ "tx_aborted_errors",
+ "tx_carrier_errors",
+ "tx_fifo_errors",
+ "tx_heartbeat_errors",
+ "tx_window_errors",
+
+ "rx_compressed",
+ "tx_compressed",
+
+ "netif_queue_stop",
+ "netif_queue_wakeup",
+ "tso4",
+ "tso6",
+ "tso_err",
+ "tcpcsum_offload",
+ "udpcsum_offload",
+ "csum_help",
+ "csum_help_err",
+ "hw_stats_updates",
+ "netif_rx_schedule",
+ "netif_rx_complete",
+ "netif_rx_dropped",
+
+ "link_toggle",
+ "cee_up",
+
+ "rxp_info_alloc_failed",
+ "mbox_intr_disabled",
+ "mbox_intr_enabled",
+ "tx_unmap_q_alloc_failed",
+ "rx_unmap_q_alloc_failed",
+ "rxbuf_alloc_failed",
+
+ "mac_frame_64",
+ "mac_frame_65_127",
+ "mac_frame_128_255",
+ "mac_frame_256_511",
+ "mac_frame_512_1023",
+ "mac_frame_1024_1518",
+ "mac_frame_1518_1522",
+ "mac_rx_bytes",
+ "mac_rx_packets",
+ "mac_rx_fcs_error",
+ "mac_rx_multicast",
+ "mac_rx_broadcast",
+ "mac_rx_control_frames",
+ "mac_rx_pause",
+ "mac_rx_unknown_opcode",
+ "mac_rx_alignment_error",
+ "mac_rx_frame_length_error",
+ "mac_rx_code_error",
+ "mac_rx_carrier_sense_error",
+ "mac_rx_undersize",
+ "mac_rx_oversize",
+ "mac_rx_fragments",
+ "mac_rx_jabber",
+ "mac_rx_drop",
+
+ "mac_tx_bytes",
+ "mac_tx_packets",
+ "mac_tx_multicast",
+ "mac_tx_broadcast",
+ "mac_tx_pause",
+ "mac_tx_deferral",
+ "mac_tx_excessive_deferral",
+ "mac_tx_single_collision",
+ "mac_tx_muliple_collision",
+ "mac_tx_late_collision",
+ "mac_tx_excessive_collision",
+ "mac_tx_total_collision",
+ "mac_tx_pause_honored",
+ "mac_tx_drop",
+ "mac_tx_jabber",
+ "mac_tx_fcs_error",
+ "mac_tx_control_frame",
+ "mac_tx_oversize",
+ "mac_tx_undersize",
+ "mac_tx_fragments",
+
+ "bpc_tx_pause_0",
+ "bpc_tx_pause_1",
+ "bpc_tx_pause_2",
+ "bpc_tx_pause_3",
+ "bpc_tx_pause_4",
+ "bpc_tx_pause_5",
+ "bpc_tx_pause_6",
+ "bpc_tx_pause_7",
+ "bpc_tx_zero_pause_0",
+ "bpc_tx_zero_pause_1",
+ "bpc_tx_zero_pause_2",
+ "bpc_tx_zero_pause_3",
+ "bpc_tx_zero_pause_4",
+ "bpc_tx_zero_pause_5",
+ "bpc_tx_zero_pause_6",
+ "bpc_tx_zero_pause_7",
+ "bpc_tx_first_pause_0",
+ "bpc_tx_first_pause_1",
+ "bpc_tx_first_pause_2",
+ "bpc_tx_first_pause_3",
+ "bpc_tx_first_pause_4",
+ "bpc_tx_first_pause_5",
+ "bpc_tx_first_pause_6",
+ "bpc_tx_first_pause_7",
+
+ "bpc_rx_pause_0",
+ "bpc_rx_pause_1",
+ "bpc_rx_pause_2",
+ "bpc_rx_pause_3",
+ "bpc_rx_pause_4",
+ "bpc_rx_pause_5",
+ "bpc_rx_pause_6",
+ "bpc_rx_pause_7",
+ "bpc_rx_zero_pause_0",
+ "bpc_rx_zero_pause_1",
+ "bpc_rx_zero_pause_2",
+ "bpc_rx_zero_pause_3",
+ "bpc_rx_zero_pause_4",
+ "bpc_rx_zero_pause_5",
+ "bpc_rx_zero_pause_6",
+ "bpc_rx_zero_pause_7",
+ "bpc_rx_first_pause_0",
+ "bpc_rx_first_pause_1",
+ "bpc_rx_first_pause_2",
+ "bpc_rx_first_pause_3",
+ "bpc_rx_first_pause_4",
+ "bpc_rx_first_pause_5",
+ "bpc_rx_first_pause_6",
+ "bpc_rx_first_pause_7",
+
+ "rad_rx_frames",
+ "rad_rx_octets",
+ "rad_rx_vlan_frames",
+ "rad_rx_ucast",
+ "rad_rx_ucast_octets",
+ "rad_rx_ucast_vlan",
+ "rad_rx_mcast",
+ "rad_rx_mcast_octets",
+ "rad_rx_mcast_vlan",
+ "rad_rx_bcast",
+ "rad_rx_bcast_octets",
+ "rad_rx_bcast_vlan",
+ "rad_rx_drops",
+
+ "fc_rx_ucast_octets",
+ "fc_rx_ucast",
+ "fc_rx_ucast_vlan",
+ "fc_rx_mcast_octets",
+ "fc_rx_mcast",
+ "fc_rx_mcast_vlan",
+ "fc_rx_bcast_octets",
+ "fc_rx_bcast",
+ "fc_rx_bcast_vlan",
+
+ "fc_tx_ucast_octets",
+ "fc_tx_ucast",
+ "fc_tx_ucast_vlan",
+ "fc_tx_mcast_octets",
+ "fc_tx_mcast",
+ "fc_tx_mcast_vlan",
+ "fc_tx_bcast_octets",
+ "fc_tx_bcast",
+ "fc_tx_bcast_vlan",
+ "fc_tx_parity_errors",
+ "fc_tx_timeout",
+ "fc_tx_fid_parity_errors",
+};
+
+static int
+bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ cmd->port = PORT_FIBRE;
+ cmd->phy_address = 0;
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static int
+bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ /* 10G full duplex setting supported only */
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ return -EOPNOTSUPP; else {
+ if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void
+bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bfa_ioc_attr *ioc_attr;
+ unsigned long flags;
+
+ strcpy(drvinfo->driver, BNAD_NAME);
+ strcpy(drvinfo->version, BNAD_VERSION);
+
+ ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ if (ioc_attr) {
+ memset(ioc_attr, 0, sizeof(*ioc_attr));
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version) - 1);
+ kfree(ioc_attr);
+ }
+
+ strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+}
+
+static int
+get_regs(struct bnad *bnad, u32 * regs)
+{
+ int num = 0, i;
+ u32 reg_addr;
+ unsigned long flags;
+
+#define BNAD_GET_REG(addr) \
+do { \
+ if (regs) \
+ regs[num++] = readl(bnad->bar0 + (addr)); \
+ else \
+ num++; \
+} while (0)
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ /* DMA Block Internal Registers */
+ BNAD_GET_REG(DMA_CTRL_REG0);
+ BNAD_GET_REG(DMA_CTRL_REG1);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS);
+ BNAD_GET_REG(DMA_ERR_INT_ENABLE);
+ BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
+
+ /* APP Block Register Address Offset from BAR0 */
+ BNAD_GET_REG(HOSTFN0_INT_STATUS);
+ BNAD_GET_REG(HOSTFN0_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN0);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
+ BNAD_GET_REG(FN0_PCIE_ERR_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN1_INT_STATUS);
+ BNAD_GET_REG(HOSTFN1_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN1);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
+ BNAD_GET_REG(FN1_PCIE_ERR_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(PCIE_MISC_REG);
+
+ BNAD_GET_REG(HOST_SEM0_REG);
+ BNAD_GET_REG(HOST_SEM1_REG);
+ BNAD_GET_REG(HOST_SEM2_REG);
+ BNAD_GET_REG(HOST_SEM3_REG);
+ BNAD_GET_REG(HOST_SEM0_INFO_REG);
+ BNAD_GET_REG(HOST_SEM1_INFO_REG);
+ BNAD_GET_REG(HOST_SEM2_INFO_REG);
+ BNAD_GET_REG(HOST_SEM3_INFO_REG);
+
+ BNAD_GET_REG(TEMPSENSE_CNTL_REG);
+ BNAD_GET_REG(TEMPSENSE_STAT_REG);
+
+ BNAD_GET_REG(APP_LOCAL_ERR_STAT);
+ BNAD_GET_REG(APP_LOCAL_ERR_MSK);
+
+ BNAD_GET_REG(PCIE_LNK_ERR_STAT);
+ BNAD_GET_REG(PCIE_LNK_ERR_MSK);
+
+ BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
+ BNAD_GET_REG(RESV_ETH_TYPE);
+
+ BNAD_GET_REG(HOSTFN2_INT_STATUS);
+ BNAD_GET_REG(HOSTFN2_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN2);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
+ BNAD_GET_REG(FN2_PCIE_ERR_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
+
+ BNAD_GET_REG(HOSTFN3_INT_STATUS);
+ BNAD_GET_REG(HOSTFN3_INT_MASK);
+ BNAD_GET_REG(HOST_PAGE_NUM_FN3);
+ BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
+ BNAD_GET_REG(FN3_PCIE_ERR_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
+ BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
+
+ /* Host Command Status Registers */
+ reg_addr = HOST_CMDSTS0_CLR_REG;
+ for (i = 0; i < 16; i++) {
+ BNAD_GET_REG(reg_addr);
+ BNAD_GET_REG(reg_addr + 4);
+ BNAD_GET_REG(reg_addr + 8);
+ reg_addr += 0x10;
+ }
+
+ /* Function ID register */
+ BNAD_GET_REG(FNC_ID_REG);
+
+ /* Function personality register */
+ BNAD_GET_REG(FNC_PERS_REG);
+
+ /* Operation mode register */
+ BNAD_GET_REG(OP_MODE);
+
+ /* LPU0 Registers */
+ BNAD_GET_REG(LPU0_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU0_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
+ BNAD_GET_REG(LPU0_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU0_ERR_SET_REG);
+
+ /* LPU1 Registers */
+ BNAD_GET_REG(LPU1_MBOX_CTL_REG);
+ BNAD_GET_REG(LPU1_MBOX_CMD_REG);
+ BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
+ BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
+ BNAD_GET_REG(LPU1_ERR_STATUS_REG);
+ BNAD_GET_REG(LPU1_ERR_SET_REG);
+
+ /* PSS Registers */
+ BNAD_GET_REG(PSS_CTL_REG);
+ BNAD_GET_REG(PSS_ERR_STATUS_REG);
+ BNAD_GET_REG(ERR_STATUS_SET);
+ BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
+
+ /* Catapult CPQ Registers */
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
+
+ BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
+ BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
+ BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
+
+ /* Host Function Force Parity Error Registers */
+ BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
+ BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
+
+ /* LL Port[0|1] Halt Mask Registers */
+ BNAD_GET_REG(LL_HALT_MSK_P0);
+ BNAD_GET_REG(LL_HALT_MSK_P1);
+
+ /* LL Port[0|1] Error Mask Registers */
+ BNAD_GET_REG(LL_ERR_MSK_P0);
+ BNAD_GET_REG(LL_ERR_MSK_P1);
+
+ /* EMC FLI Registers */
+ BNAD_GET_REG(FLI_CMD_REG);
+ BNAD_GET_REG(FLI_ADDR_REG);
+ BNAD_GET_REG(FLI_CTL_REG);
+ BNAD_GET_REG(FLI_WRDATA_REG);
+ BNAD_GET_REG(FLI_RDDATA_REG);
+ BNAD_GET_REG(FLI_DEV_STATUS_REG);
+ BNAD_GET_REG(FLI_SIG_WD_REG);
+
+ BNAD_GET_REG(FLI_DEV_VENDOR_REG);
+ BNAD_GET_REG(FLI_ERR_STATUS_REG);
+
+ /* RxAdm 0 Registers */
+ BNAD_GET_REG(RAD0_CTL_REG);
+ BNAD_GET_REG(RAD0_PE_PARM_REG);
+ BNAD_GET_REG(RAD0_BCN_REG);
+ BNAD_GET_REG(RAD0_DEFAULT_REG);
+ BNAD_GET_REG(RAD0_PROMISC_REG);
+ BNAD_GET_REG(RAD0_BCNQ_REG);
+ BNAD_GET_REG(RAD0_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD0_ERR_STS);
+ BNAD_GET_REG(RAD0_SET_ERR_STS);
+ BNAD_GET_REG(RAD0_ERR_INT_EN);
+ BNAD_GET_REG(RAD0_FIRST_ERR);
+ BNAD_GET_REG(RAD0_FORCE_ERR);
+
+ BNAD_GET_REG(RAD0_MAC_MAN_1H);
+ BNAD_GET_REG(RAD0_MAC_MAN_1L);
+ BNAD_GET_REG(RAD0_MAC_MAN_2H);
+ BNAD_GET_REG(RAD0_MAC_MAN_2L);
+ BNAD_GET_REG(RAD0_MAC_MAN_3H);
+ BNAD_GET_REG(RAD0_MAC_MAN_3L);
+ BNAD_GET_REG(RAD0_MAC_MAN_4H);
+ BNAD_GET_REG(RAD0_MAC_MAN_4L);
+
+ BNAD_GET_REG(RAD0_LAST4_IP);
+
+ /* RxAdm 1 Registers */
+ BNAD_GET_REG(RAD1_CTL_REG);
+ BNAD_GET_REG(RAD1_PE_PARM_REG);
+ BNAD_GET_REG(RAD1_BCN_REG);
+ BNAD_GET_REG(RAD1_DEFAULT_REG);
+ BNAD_GET_REG(RAD1_PROMISC_REG);
+ BNAD_GET_REG(RAD1_BCNQ_REG);
+ BNAD_GET_REG(RAD1_DEFAULTQ_REG);
+
+ BNAD_GET_REG(RAD1_ERR_STS);
+ BNAD_GET_REG(RAD1_SET_ERR_STS);
+ BNAD_GET_REG(RAD1_ERR_INT_EN);
+
+ /* TxA0 Registers */
+ BNAD_GET_REG(TXA0_CTRL_REG);
+ /* TxA0 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
+ }
+
+ /* TxA1 Registers */
+ BNAD_GET_REG(TXA1_CTRL_REG);
+ /* TxA1 TSO Sequence # Registers (RO) */
+ for (i = 0; i < 8; i++) {
+ BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
+ BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
+ }
+
+ /* RxA Registers */
+ BNAD_GET_REG(RXA0_CTL_REG);
+ BNAD_GET_REG(RXA1_CTL_REG);
+
+ /* PLB0 Registers */
+ BNAD_GET_REG(PLB0_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB0_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB0_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB0_RL_MIN_REG);
+ BNAD_GET_REG(PLB0_RL_MAX_REG);
+ BNAD_GET_REG(PLB0_EMS_ADD_REG);
+
+ /* PLB1 Registers */
+ BNAD_GET_REG(PLB1_ECM_TIMER_REG);
+ BNAD_GET_REG(PLB1_RL_CTL);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_MAX_BC(i));
+ BNAD_GET_REG(PLB1_RL_TU_PRIO);
+ for (i = 0; i < 8; i++)
+ BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
+ BNAD_GET_REG(PLB1_RL_MIN_REG);
+ BNAD_GET_REG(PLB1_RL_MAX_REG);
+ BNAD_GET_REG(PLB1_EMS_ADD_REG);
+
+ /* HQM Control Register */
+ BNAD_GET_REG(HQM0_CTL_REG);
+ BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_CTL_REG);
+ BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
+ BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
+
+ /* LUT Registers */
+ BNAD_GET_REG(LUT0_ERR_STS);
+ BNAD_GET_REG(LUT0_SET_ERR_STS);
+ BNAD_GET_REG(LUT1_ERR_STS);
+ BNAD_GET_REG(LUT1_SET_ERR_STS);
+
+ /* TRC Registers */
+ BNAD_GET_REG(TRC_CTL_REG);
+ BNAD_GET_REG(TRC_MODS_REG);
+ BNAD_GET_REG(TRC_TRGC_REG);
+ BNAD_GET_REG(TRC_CNT1_REG);
+ BNAD_GET_REG(TRC_CNT2_REG);
+ BNAD_GET_REG(TRC_NXTS_REG);
+ BNAD_GET_REG(TRC_DIRR_REG);
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_TRGM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_NXTM_REG(i));
+ for (i = 0; i < 10; i++)
+ BNAD_GET_REG(TRC_STRM_REG(i));
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+#undef BNAD_GET_REG
+ return num;
+}
+static int
+bnad_get_regs_len(struct net_device *netdev)
+{
+ int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
+ return ret;
+}
+
+static void
+bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
+{
+ memset(buf, 0, bnad_get_regs_len(netdev));
+ get_regs(netdev_priv(netdev), buf);
+}
+
+static void
+bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
+{
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+}
+
+static int
+bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+
+ /* Lock rqd. to access bnad->bna_lock */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ coalesce->use_adaptive_rx_coalesce =
+ (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
+ BFI_COALESCING_TIMER_UNIT;
+ coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
+
+ return 0;
+}
+
+static int
+bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
+ int dim_timer_del = 0;
+
+ if (coalesce->rx_coalesce_usecs == 0 ||
+ coalesce->rx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ if (coalesce->tx_coalesce_usecs == 0 ||
+ coalesce->tx_coalesce_usecs >
+ BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ /*
+ * Do not need to store rx_coalesce_usecs here
+ * Every time DIM is disabled, we can get it from the
+ * stack.
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (coalesce->use_adaptive_rx_coalesce) {
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
+ bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
+ bnad_dim_timer_start(bnad);
+ }
+ } else {
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
+ bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
+ dim_timer_del = bnad_dim_timer_running(bnad);
+ if (dim_timer_del) {
+ clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ del_timer_sync(&bnad->dim_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ }
+ bnad_rx_coalescing_timeo_set(bnad);
+ }
+ }
+ if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+ bnad_tx_coalescing_timeo_set(bnad);
+ }
+
+ if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT) {
+ bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
+ BFI_COALESCING_TIMER_UNIT;
+
+ if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
+ bnad_rx_coalescing_timeo_set(bnad);
+
+ }
+
+ /* Add Tx Inter-pkt DMA count? */
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
+
+ ringparam->rx_pending = bnad->rxq_depth;
+ ringparam->rx_mini_max_pending = 0;
+ ringparam->rx_jumbo_max_pending = 0;
+ ringparam->tx_pending = bnad->txq_depth;
+}
+
+static int
+bnad_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam)
+{
+ int i, current_err, err = 0;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (ringparam->rx_pending == bnad->rxq_depth &&
+ ringparam->tx_pending == bnad->txq_depth) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
+ if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
+ !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+ if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
+ !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return -EINVAL;
+ }
+
+ if (ringparam->rx_pending != bnad->rxq_depth) {
+ bnad->rxq_depth = ringparam->rx_pending;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ bnad_cleanup_rx(bnad, i);
+ current_err = bnad_setup_rx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+ if (ringparam->tx_pending != bnad->txq_depth) {
+ bnad->txq_depth = ringparam->tx_pending;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ bnad_cleanup_tx(bnad, i);
+ current_err = bnad_setup_tx(bnad, i);
+ if (current_err && !err)
+ err = current_err;
+ }
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+ return err;
+}
+
+static void
+bnad_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ pauseparam->autoneg = 0;
+ pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
+ pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
+}
+
+static int
+bnad_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bna_pause_config pause_config;
+ unsigned long flags;
+
+ if (pauseparam->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
+ pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
+ pause_config.rx_pause = pauseparam->rx_pause;
+ pause_config.tx_pause = pauseparam->tx_pause;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static u32
+bnad_get_rx_csum(struct net_device *netdev)
+{
+ u32 rx_csum;
+ struct bnad *bnad = netdev_priv(netdev);
+
+ rx_csum = bnad->rx_csum;
+ return rx_csum;
+}
+
+static int
+bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ bnad->rx_csum = rx_csum;
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int
+bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tx_csum) {
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ } else {
+ netdev->features &= ~NETIF_F_IP_CSUM;
+ netdev->features &= ~NETIF_F_IPV6_CSUM;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static int
+bnad_set_tso(struct net_device *netdev, u32 tso)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+
+ mutex_lock(&bnad->conf_mutex);
+ if (tso) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+}
+
+static void
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, q_num;
+ u64 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
+ ETH_GSTRING_LEN));
+ memcpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ sprintf(string, "txf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_errors", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txf%d_filter_mac_sa", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ sprintf(string, "rxf%d_ucast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_ucast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_mcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_octets", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_bcast_vlan", i);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxf%d_frame_drops", i);
+ string += ETH_GSTRING_LEN;
+ }
+ bmap >>= 1;
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "cq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_hw_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_packets_with_error",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ sprintf(string, "rxq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string,
+ "rxq%d_packets_with_error", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_allocbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_producer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+ }
+
+ q_num = 0;
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++) {
+ sprintf(string, "txq%d_packets", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_bytes", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_producer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_consumer_index", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "txq%d_hw_consumer_index",
+ q_num);
+ string += ETH_GSTRING_LEN;
+ q_num++;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_stats_count_locked(struct net_device *netdev)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, count, rxf_active_num = 0, txf_active_num = 0;
+ u64 bmap;
+
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1)
+ txf_active_num++;
+ bmap >>= 1;
+ }
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1)
+ rxf_active_num++;
+ bmap >>= 1;
+ }
+ count = BNAD_ETHTOOL_STATS_NUM +
+ txf_active_num * BNAD_NUM_TXF_COUNTERS +
+ rxf_active_num * BNAD_NUM_RXF_COUNTERS;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
+ count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
+ count += BNAD_NUM_RXQ_COUNTERS;
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
+ }
+ return count;
+}
+
+static int
+bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
+{
+ int i, j;
+ struct bna_rcb *rcb = NULL;
+ struct bna_tcb *tcb = NULL;
+
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
+ buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
+ ccb->producer_index;
+ buf[bi++] = 0; /* ccb->consumer_index */
+ buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
+ ccb->hw_producer_index);
+ }
+ }
+ for (i = 0; i < bnad->num_rx; i++) {
+ if (!bnad->rx_info[i].rx)
+ continue;
+ for (j = 0; j < bnad->num_rxp_per_rx; j++)
+ if (bnad->rx_info[i].rx_ctrl[j].ccb) {
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[0]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[0];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
+ bnad->rx_info[i].rx_ctrl[j].ccb->
+ rcb[1]->rxq) {
+ rcb = bnad->rx_info[i].rx_ctrl[j].
+ ccb->rcb[1];
+ buf[bi++] = rcb->rxq->rx_packets;
+ buf[bi++] = rcb->rxq->rx_bytes;
+ buf[bi++] = rcb->rxq->
+ rx_packets_with_error;
+ buf[bi++] = rcb->rxq->
+ rxbuf_alloc_failed;
+ buf[bi++] = rcb->producer_index;
+ buf[bi++] = rcb->consumer_index;
+ }
+ }
+ }
+
+ for (i = 0; i < bnad->num_tx; i++) {
+ if (!bnad->tx_info[i].tx)
+ continue;
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ if (bnad->tx_info[i].tcb[j] &&
+ bnad->tx_info[i].tcb[j]->txq) {
+ tcb = bnad->tx_info[i].tcb[j];
+ buf[bi++] = tcb->txq->tx_packets;
+ buf[bi++] = tcb->txq->tx_bytes;
+ buf[bi++] = tcb->producer_index;
+ buf[bi++] = tcb->consumer_index;
+ buf[bi++] = *(tcb->hw_consumer_index);
+ }
+ }
+
+ return bi;
+}
+
+static void
+bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ u64 *buf)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ int i, j, bi;
+ unsigned long flags;
+ struct rtnl_link_stats64 *net_stats64;
+ u64 *stats64;
+ u64 bmap;
+
+ mutex_lock(&bnad->conf_mutex);
+ if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
+ mutex_unlock(&bnad->conf_mutex);
+ return;
+ }
+
+ /*
+ * Used bna_lock to sync reads from bna_stats, which is written
+ * under the same lock
+ */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bi = 0;
+ memset(buf, 0, stats->n_stats * sizeof(u64));
+
+ net_stats64 = (struct rtnl_link_stats64 *)buf;
+ bnad_netdev_qstats_fill(bnad, net_stats64);
+ bnad_netdev_hwstats_fill(bnad, net_stats64);
+
+ bi = sizeof(*net_stats64) / sizeof(u64);
+
+ /* Fill driver stats into ethtool buffers */
+ stats64 = (u64 *)&bnad->stats.drv_stats;
+ for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
+ stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
+ for (i = 0;
+ i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
+ i++)
+ buf[bi++] = stats64[i];
+
+ /* Fill txf stats into ethtool buffers */
+ bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
+ ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats->txf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill rxf stats into ethtool buffers */
+ bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
+ ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
+ for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ if (bmap & 1) {
+ stats64 = (u64 *)&bnad->stats.bna_stats->
+ hw_stats->rxf_stats[i];
+ for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
+ sizeof(u64); j++)
+ buf[bi++] = stats64[j];
+ }
+ bmap >>= 1;
+ }
+
+ /* Fill per Q stats into ethtool buffers */
+ bi = bnad_per_q_stats_fill(bnad, buf, bi);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ mutex_unlock(&bnad->conf_mutex);
+}
+
+static int
+bnad_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return bnad_get_stats_count_locked(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct ethtool_ops bnad_ethtool_ops = {
+ .get_settings = bnad_get_settings,
+ .set_settings = bnad_set_settings,
+ .get_drvinfo = bnad_get_drvinfo,
+ .get_regs_len = bnad_get_regs_len,
+ .get_regs = bnad_get_regs,
+ .get_wol = bnad_get_wol,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = bnad_get_coalesce,
+ .set_coalesce = bnad_set_coalesce,
+ .get_ringparam = bnad_get_ringparam,
+ .set_ringparam = bnad_set_ringparam,
+ .get_pauseparam = bnad_get_pauseparam,
+ .set_pauseparam = bnad_set_pauseparam,
+ .get_rx_csum = bnad_get_rx_csum,
+ .set_rx_csum = bnad_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = bnad_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = bnad_set_tso,
+ .get_strings = bnad_get_strings,
+ .get_ethtool_stats = bnad_get_ethtool_stats,
+ .get_sset_count = bnad_get_sset_count
+};
+
+void
+bnad_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+}
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
new file mode 100644
index 000000000000..bbd39dc65972
--- /dev/null
+++ b/drivers/net/bna/cna.h
@@ -0,0 +1,81 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#ifndef __CNA_H__
+#define __CNA_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/string.h>
+
+#include <linux/list.h>
+
+#define bfa_sm_fault(__mod, __event) do { \
+ pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
+ __event); \
+} while (0)
+
+extern char bfa_version[];
+
+#define CNA_FW_FILE_CT "ctfw_cna.bin"
+#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
+
+#pragma pack(1)
+
+#define MAC_ADDRLEN (6)
+typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+
+#pragma pack()
+
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) { \
+ bfa_q_next(_qe) = (struct list_head *) NULL; \
+ bfa_q_prev(_qe) = (struct list_head *) NULL; \
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) { \
+ if (!list_empty(_q)) { \
+ (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
+ bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
+ (struct list_head *) (_q); \
+ bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
+ bfa_q_qe_init(*((struct list_head **) _qe)); \
+ } else { \
+ *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ } \
+}
+
+#endif /* __CNA_H__ */
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
new file mode 100644
index 000000000000..0bd1d3790a27
--- /dev/null
+++ b/drivers/net/bna/cna_fwimg.c
@@ -0,0 +1,64 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+#include <linux/firmware.h>
+#include "cna.h"
+
+const struct firmware *bfi_fw;
+static u32 *bfi_image_ct_cna;
+static u32 bfi_image_ct_cna_size;
+
+u32 *
+cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ u32 *bfi_image_size, char *fw_name)
+{
+ const struct firmware *fw;
+
+ if (request_firmware(&fw, fw_name, &pdev->dev)) {
+ pr_alert("Can't locate firmware %s\n", fw_name);
+ goto error;
+ }
+
+ *bfi_image = (u32 *)fw->data;
+ *bfi_image_size = fw->size/sizeof(u32);
+ bfi_fw = fw;
+
+ return *bfi_image;
+error:
+ return NULL;
+}
+
+u32 *
+cna_get_firmware_buf(struct pci_dev *pdev)
+{
+ if (bfi_image_ct_cna_size == 0)
+ cna_read_firmware(pdev, &bfi_image_ct_cna,
+ &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
+ return bfi_image_ct_cna;
+}
+
+u32 *
+bfa_cb_image_get_chunk(int type, u32 off)
+{
+ return (u32 *)(bfi_image_ct_cna + off);
+}
+
+u32
+bfa_cb_image_get_size(int type)
+{
+ return bfi_image_ct_cna_size;
+}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index e6a803f1c507..4ff76e38e788 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,7 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
+#include <linux/aer.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -3217,7 +3218,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (bp->rx_csum &&
(status & (L2_FHDR_STATUS_TCP_SEGMENT |
L2_FHDR_STATUS_UDP_DATAGRAM))) {
@@ -7890,6 +7891,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
+ int err;
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@@ -7925,6 +7927,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_disable;
}
+ /* AER (Advanced Error Reporting) hooks */
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
+ "0x%x\n", err);
+ /* non-fatal, continue */
+ }
+
pci_set_master(pdev);
pci_save_state(pdev);
@@ -8246,6 +8256,7 @@ err_out_unmap:
}
err_out_release:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable:
@@ -8436,6 +8447,9 @@ bnx2_remove_one(struct pci_dev *pdev)
kfree(bp->temp_stats_blk);
free_netdev(dev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -8527,25 +8541,35 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
+ pci_ers_result_t result;
+ int err;
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
- rtnl_unlock();
- return PCI_ERS_RESULT_DISCONNECT;
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (netif_running(dev)) {
+ bnx2_set_power_state(bp, PCI_D0);
+ bnx2_init_nic(bp, 1);
+ }
+ result = PCI_ERS_RESULT_RECOVERED;
}
- pci_set_master(pdev);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+ rtnl_unlock();
- if (netif_running(dev)) {
- bnx2_set_power_state(bp, PCI_D0);
- bnx2_init_nic(bp, 1);
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err); /* non-fatal, continue */
}
- rtnl_unlock();
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 53af9c93e75c..64329c5fbdea 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.52.53-3"
-#define DRV_MODULE_RELDATE "2010/18/04"
+#define DRV_MODULE_VERSION "1.52.53-7"
+#define DRV_MODULE_RELDATE "2010/09/12"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -369,6 +369,7 @@ struct bnx2x_fastpath {
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+#define MIN_RX_AVAIL 128
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
(MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -566,13 +567,13 @@ struct bnx2x_common {
struct bnx2x_port {
u32 pmf;
- u32 link_config;
+ u32 link_config[LINK_CONFIG_SIZE];
- u32 supported;
+ u32 supported[LINK_CONFIG_SIZE];
/* link settings - missing defines */
#define SUPPORTED_2500baseX_Full (1 << 15)
- u32 advertising;
+ u32 advertising[LINK_CONFIG_SIZE];
/* link settings - missing defines */
#define ADVERTISED_2500baseX_Full (1 << 15)
@@ -931,7 +932,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
@@ -939,7 +940,7 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
void bnx2x_update_coalesce(struct bnx2x *bp);
-
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
{
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 02bf710629a3..efc7be4aefb5 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -20,6 +20,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include <linux/firmware.h>
#include "bnx2x_cmn.h"
#ifdef BCM_VLAN
@@ -622,7 +623,7 @@ reuse_rx:
/* Set Toeplitz hash for a none-LRO skb */
bnx2x_set_skb_rxhash(bp, cqe, skb);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -780,6 +781,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
ETH_MAX_AGGREGATION_QUEUES_E1H;
u16 ring_prod, cqe_ring_prod;
int i, j;
+ int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
+ MAX_RX_AVAIL/bp->num_queues;
+
+ rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
DP(NETIF_MSG_IFUP,
@@ -882,7 +887,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
/* Allocate BDs and initialize BD ring */
fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
- for (i = 0; i < bp->rx_ring_size; i++) {
+ for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx skbs on queue[%d]\n", i, j);
@@ -1206,12 +1211,27 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
return rc;
}
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
u32 load_code;
int i, rc;
+ /* Set init arrays */
+ rc = bnx2x_init_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Error loading firmware\n");
+ return rc;
+ }
+
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EPERM;
@@ -1267,7 +1287,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
common blocks should be initialized, otherwise - not
*/
if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
@@ -1306,9 +1326,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
goto load_error2;
}
@@ -1323,7 +1343,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Send LOAD_DONE command to MCP */
if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
@@ -1427,6 +1447,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#endif
bnx2x_inc_load_cnt(bp);
+ bnx2x_release_firmware(bp);
+
return 0;
#ifdef BCM_CNIC
@@ -1437,8 +1459,8 @@ load_error4:
load_error3:
bnx2x_int_disable_sync(bp, 1);
if (!BP_NOMCP(bp)) {
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -1454,6 +1476,8 @@ load_error1:
netif_napi_del(&bnx2x_fp(bp, i, napi));
bnx2x_free_mem(bp);
+ bnx2x_release_firmware(bp);
+
return rc;
}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index d1979b1a7ed2..d1e6a8c977d1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -49,10 +49,11 @@ void bnx2x_link_set(struct bnx2x *bp);
* Query link status
*
* @param bp
+ * @param is_serdes
*
* @return 0 - link is UP
*/
-u8 bnx2x_link_test(struct bnx2x *bp);
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
/**
* Handles link status change
@@ -115,6 +116,15 @@ void bnx2x_int_enable(struct bnx2x *bp);
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
+ * Loads device firmware
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_init_firmware(struct bnx2x *bp);
+
+/**
* Init HW blocks according to current initialization stage:
* COMMON, PORT or FUNCTION.
*
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8b75b05e34c5..d9748e97fad3 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -29,9 +29,12 @@
static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
-
- cmd->supported = bp->port.supported;
- cmd->advertising = bp->port.advertising;
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Dual Media boards present all available port types */
+ cmd->supported = bp->port.supported[cfg_idx] |
+ (bp->port.supported[cfg_idx ^ 1] &
+ (SUPPORTED_TP | SUPPORTED_FIBRE));
+ cmd->advertising = bp->port.advertising[cfg_idx];
if ((bp->state == BNX2X_STATE_OPEN) &&
!(bp->flags & MF_FUNC_DIS) &&
@@ -48,47 +51,21 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = vn_max_rate;
}
} else {
- cmd->speed = -1;
- cmd->duplex = -1;
+ cmd->speed = bp->link_params.req_line_speed[cfg_idx];
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
- if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
- u32 ext_phy_type =
- XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- cmd->port = PORT_FIBRE;
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- cmd->port = PORT_TP;
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
- bp->link_params.ext_phy_config);
- break;
-
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- bp->link_params.ext_phy_config);
- break;
- }
- } else
+ if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
+ else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+ cmd->port = PORT_FIBRE;
+ else
+ BNX2X_ERR("XGXS PHY Failure detected\n");
cmd->phy_address = bp->mdio.prtad;
cmd->transceiver = XCVR_INTERNAL;
- if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
cmd->autoneg = AUTONEG_ENABLE;
else
cmd->autoneg = AUTONEG_DISABLE;
@@ -110,7 +87,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 advertising;
+ u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
if (IS_E1HMF(bp))
return 0;
@@ -123,26 +100,81 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ old_multi_phy_config = bp->link_params.multi_phy_config;
+ switch (cmd->port) {
+ case PORT_TP:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_TP ||
+ bp->port.supported[1] & SUPPORTED_TP)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_FIBRE:
+ if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+ break; /* no port change */
+
+ if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+ bp->port.supported[1] & SUPPORTED_FIBRE)) {
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ bp->link_params.multi_phy_config &=
+ ~PORT_HW_CFG_PHY_SELECTION_MASK;
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ else
+ bp->link_params.multi_phy_config |=
+ PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ return -EINVAL;
+ }
+ /* Save new config in case command complete successuly */
+ new_multi_phy_config = bp->link_params.multi_phy_config;
+ /* Get the new cfg_idx */
+ cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ /* Restore old config in case command failed */
+ bp->link_params.multi_phy_config = old_multi_phy_config;
+ DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+
if (cmd->autoneg == AUTONEG_ENABLE) {
- if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
- cmd->advertising &= bp->port.supported;
+ cmd->advertising &= bp->port.supported[cfg_idx];
- bp->link_params.req_line_speed = SPEED_AUTO_NEG;
- bp->link_params.req_duplex = DUPLEX_FULL;
- bp->port.advertising |= (ADVERTISED_Autoneg |
+ bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
+ bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
cmd->advertising);
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
- switch (cmd->speed) {
+ u32 speed = cmd->speed;
+ speed |= (cmd->speed_hi << 16);
+ switch (speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL) {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
DP(NETIF_MSG_LINK,
"10M full not supported\n");
@@ -152,7 +184,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
advertising = (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Half)) {
DP(NETIF_MSG_LINK,
"10M half not supported\n");
@@ -166,7 +198,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_100:
if (cmd->duplex == DUPLEX_FULL) {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
DP(NETIF_MSG_LINK,
"100M full not supported\n");
@@ -176,7 +208,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
advertising = (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- if (!(bp->port.supported &
+ if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Half)) {
DP(NETIF_MSG_LINK,
"100M half not supported\n");
@@ -194,7 +226,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
+ if (!(bp->port.supported[cfg_idx] &
+ SUPPORTED_1000baseT_Full)) {
DP(NETIF_MSG_LINK, "1G full not supported\n");
return -EINVAL;
}
@@ -210,7 +243,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_2500baseX_Full)) {
DP(NETIF_MSG_LINK,
"2.5G full not supported\n");
return -EINVAL;
@@ -226,7 +260,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
- if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
+ if (!(bp->port.supported[cfg_idx]
+ & SUPPORTED_10000baseT_Full)) {
DP(NETIF_MSG_LINK, "10G full not supported\n");
return -EINVAL;
}
@@ -236,20 +271,23 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported speed\n");
+ DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
return -EINVAL;
}
- bp->link_params.req_line_speed = cmd->speed;
- bp->link_params.req_duplex = cmd->duplex;
- bp->port.advertising = advertising;
+ bp->link_params.req_line_speed[cfg_idx] = speed;
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = advertising;
}
DP(NETIF_MSG_LINK, "req_line_speed %d\n"
DP_LEVEL " req_duplex %d advertising 0x%x\n",
- bp->link_params.req_line_speed, bp->link_params.req_duplex,
- bp->port.advertising);
+ bp->link_params.req_line_speed[cfg_idx],
+ bp->link_params.req_duplex[cfg_idx],
+ bp->port.advertising[cfg_idx]);
+ /* Set new config */
+ bp->link_params.multi_phy_config = new_multi_phy_config;
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
bnx2x_link_set(bp);
@@ -811,7 +849,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int port = BP_PORT(bp);
int rc = 0;
-
+ u32 ext_phy_config;
if (!netif_running(dev))
return -EAGAIN;
@@ -827,6 +865,10 @@ static int bnx2x_set_eeprom(struct net_device *dev,
!bp->port.pmf)
return -EINVAL;
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
if (eeprom->magic == 0x50485950) {
/* 'PHYP' (0x50485950): prepare phy for FW upgrade */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -834,7 +876,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
bnx2x_acquire_phy_lock(bp);
rc |= bnx2x_link_reset(&bp->link_params,
&bp->link_vars, 0);
- if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
MISC_REGISTERS_GPIO_HIGH, port);
@@ -855,10 +897,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
}
} else if (eeprom->magic == 0x53985943) {
/* 'PHYC' (0x53985943): PHY FW upgrade completed */
- if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+ if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
/* DSP Remove Download Mode */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
@@ -866,7 +906,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
bnx2x_acquire_phy_lock(bp);
- bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
+ bnx2x_sfx7101_sp_sw_reset(bp,
+ &bp->link_params.phy[EXT_PHY1]);
/* wait 0.5 sec to allow it to run */
msleep(500);
@@ -920,7 +961,14 @@ static void bnx2x_get_ringparam(struct net_device *dev,
ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = 0;
- ering->rx_pending = bp->rx_ring_size;
+ if (bp->rx_ring_size)
+ ering->rx_pending = bp->rx_ring_size;
+ else
+ if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
+ ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
+ else
+ ering->rx_pending = MAX_RX_AVAIL;
+
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
@@ -940,6 +988,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
}
if ((ering->rx_pending > MAX_RX_AVAIL) ||
+ (ering->rx_pending < MIN_RX_AVAIL) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
return -EINVAL;
@@ -959,10 +1008,9 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct bnx2x *bp = netdev_priv(dev);
-
- epause->autoneg = (bp->link_params.req_flow_ctrl ==
- BNX2X_FLOW_CTRL_AUTO) &&
- (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+ BNX2X_FLOW_CTRL_AUTO);
epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
BNX2X_FLOW_CTRL_RX);
@@ -978,7 +1026,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct bnx2x *bp = netdev_priv(dev);
-
+ u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
if (IS_E1HMF(bp))
return 0;
@@ -986,29 +1034,31 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
if (epause->rx_pause)
- bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
if (epause->tx_pause)
- bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+ bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
- if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+ bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
if (epause->autoneg) {
- if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+ if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "autoneg not supported\n");
return -EINVAL;
}
- if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+ bp->link_params.req_flow_ctrl[cfg_idx] =
+ BNX2X_FLOW_CTRL_AUTO;
+ }
}
DP(NETIF_MSG_LINK,
- "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
+ "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -1272,12 +1322,12 @@ test_mem_exit:
return rc;
}
-static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
{
int cnt = 1000;
if (link_up)
- while (bnx2x_link_test(bp) && cnt--)
+ while (bnx2x_link_test(bp, is_serdes) && cnt--)
msleep(10);
}
@@ -1304,7 +1354,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
@@ -1549,7 +1599,7 @@ static void bnx2x_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
-
+ u8 is_serdes;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
printk(KERN_ERR "Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
@@ -1564,6 +1614,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* offline tests are not supported in MF mode */
if (IS_E1HMF(bp))
etest->flags &= ~ETH_TEST_FL_OFFLINE;
+ is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int port = BP_PORT(bp);
@@ -1575,11 +1626,12 @@ static void bnx2x_self_test(struct net_device *dev,
/* disable input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
- link_up = (bnx2x_link_test(bp) == 0);
+ link_up = bp->link_vars.link_up;
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
bnx2x_nic_load(bp, LOAD_DIAG);
/* wait until link state is restored */
- bnx2x_wait_for_link(bp, link_up);
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
if (bnx2x_test_registers(bp) != 0) {
buf[0] = 1;
@@ -1600,7 +1652,7 @@ static void bnx2x_self_test(struct net_device *dev,
bnx2x_nic_load(bp, LOAD_NORMAL);
/* wait until link state is restored */
- bnx2x_wait_for_link(bp, link_up);
+ bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
buf[3] = 1;
@@ -1611,7 +1663,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bp->port.pmf)
- if (bnx2x_link_test(bp) != 0) {
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
buf[5] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -1910,10 +1962,11 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
- bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
- SPEED_1000);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER, SPEED_1000);
else
- bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OFF, 0);
msleep_interruptible(500);
if (signal_pending(current))
@@ -1921,7 +1974,7 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
}
if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+ bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
bp->link_vars.line_speed);
return 0;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index fd1f29e0317d..60d141cd9950 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -78,6 +78,8 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_LED_PHY11 0x000b0000
#define SHARED_HW_CFG_LED_MAC4 0x000c0000
#define SHARED_HW_CFG_LED_PHY8 0x000d0000
+#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+
#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
@@ -120,6 +122,23 @@ struct shared_hw_cfg { /* NVRAM Offset */
#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+ /* Set the MDC/MDIO access for the first external phy */
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
u32 power_dissipated; /* 0x11c */
#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
@@ -221,7 +240,88 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[64]; /* 0x1A8 */
+ u32 Reserved1[57]; /* 0x1A8 */
+ u32 speed_capability_mask2; /* 0x28C */
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800
+
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000
+#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ u32 multi_phy_config; /* 0x290 */
+#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ u32 external_phy_config2; /* 0x294 */
+#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ u16 xgxs_config2_rx[4]; /* 0x296 */
+ u16 xgxs_config2_tx[4]; /* 0x2A0 */
u32 lane_config;
#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
@@ -515,10 +615,17 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
/* The default for MCP link configuration,
- uses the same defines as link_config */
+ uses the same defines as link_config */
u32 mfw_wol_link_cfg;
+ /* The default for the driver of the second external phy,
+ uses the same defines as link_config */
+ u32 link_config2; /* 0x47C */
+
+ /* The default for MCP of the second external phy,
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 reserved[19];
+ u32 Reserved2[17]; /* 0x484 */
};
@@ -686,8 +793,14 @@ struct drv_func_mb {
* The optic module verification commands require bootcode
* v5.0.6 or later
*/
-#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
-#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006
+#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ /*
+ * The specific optic module verification command requires bootcode
+ * v5.2.12 or later
+ */
+#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
@@ -922,7 +1035,12 @@ struct shmem2_region {
#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
-
+ u32 ext_phy_fw_version2[PORT_MAX];
+ /*
+ * For backwards compatibility, if the mf_cfg_addr does not exist
+ * (the size filed is smaller than 0xc) the mf_cfg resides at the
+ * end of struct shmem_region
+ */
};
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 0383e3066313..a07a3a6abd40 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -168,50 +168,19 @@
/**********************************************************/
/* INTERFACE */
/**********************************************************/
-#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
- bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \
- DEFAULT_PHY_DEV_ADDR, \
+
+#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_write(_bp, _phy, \
+ (_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \
- bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \
- DEFAULT_PHY_DEV_ADDR, \
+#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+ bnx2x_cl45_read(_bp, _phy, \
+ (_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-static void bnx2x_set_serdes_access(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
- /* Set Clause 22 */
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
- udelay(500);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
- udelay(500);
- /* Set Clause 45 */
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0);
-}
-static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags)
-{
- struct bnx2x *bp = params->bp;
-
- if (phy_flags & PHY_XGXS_FLAG) {
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
- params->port*0x18, 0);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
- DEFAULT_PHY_DEV_ADDR);
- } else {
- bnx2x_set_serdes_access(params);
-
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
- params->port*0x10,
- DEFAULT_PHY_DEV_ADDR);
- }
-}
-
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -527,162 +496,6 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
return 0;
}
-static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
-{
- struct bnx2x *bp = params->bp;
- u32 val;
-
- if (phy_flags & PHY_XGXS_FLAG) {
- DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
- val = XGXS_RESET_BITS;
-
- } else { /* SerDes */
- DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
- val = SERDES_RESET_BITS;
- }
-
- val = val << (params->port*16);
-
- /* reset and unreset the SerDes/XGXS */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
- val);
- udelay(500);
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
- val);
- bnx2x_set_phy_mdio(params, phy_flags);
-}
-
-void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u8 link_10g;
- u8 port = params->port;
-
- if (params->switch_cfg == SWITCH_CFG_1G)
- vars->phy_flags = PHY_SERDES_FLAG;
- else
- vars->phy_flags = PHY_XGXS_FLAG;
- vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
-
- vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
-
- if (vars->link_up) {
- DP(NETIF_MSG_LINK, "phy link up\n");
-
- vars->phy_link_up = 1;
- vars->duplex = DUPLEX_FULL;
- switch (vars->link_status &
- LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
- case LINK_10THD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_10TFD:
- vars->line_speed = SPEED_10;
- break;
-
- case LINK_100TXHD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_100T4:
- case LINK_100TXFD:
- vars->line_speed = SPEED_100;
- break;
-
- case LINK_1000THD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_1000TFD:
- vars->line_speed = SPEED_1000;
- break;
-
- case LINK_2500THD:
- vars->duplex = DUPLEX_HALF;
- /* fall thru */
- case LINK_2500TFD:
- vars->line_speed = SPEED_2500;
- break;
-
- case LINK_10GTFD:
- vars->line_speed = SPEED_10000;
- break;
-
- case LINK_12GTFD:
- vars->line_speed = SPEED_12000;
- break;
-
- case LINK_12_5GTFD:
- vars->line_speed = SPEED_12500;
- break;
-
- case LINK_13GTFD:
- vars->line_speed = SPEED_13000;
- break;
-
- case LINK_15GTFD:
- vars->line_speed = SPEED_15000;
- break;
-
- case LINK_16GTFD:
- vars->line_speed = SPEED_16000;
- break;
-
- default:
- break;
- }
-
- if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
- vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
- else
- vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
-
- if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
- vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
- else
- vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
-
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- if (vars->line_speed &&
- ((vars->line_speed == SPEED_10) ||
- (vars->line_speed == SPEED_100))) {
- vars->phy_flags |= PHY_SGMII_FLAG;
- } else {
- vars->phy_flags &= ~PHY_SGMII_FLAG;
- }
- }
-
- /* anything 10 and over uses the bmac */
- link_10g = ((vars->line_speed == SPEED_10000) ||
- (vars->line_speed == SPEED_12000) ||
- (vars->line_speed == SPEED_12500) ||
- (vars->line_speed == SPEED_13000) ||
- (vars->line_speed == SPEED_15000) ||
- (vars->line_speed == SPEED_16000));
- if (link_10g)
- vars->mac_type = MAC_TYPE_BMAC;
- else
- vars->mac_type = MAC_TYPE_EMAC;
-
- } else { /* link down */
- DP(NETIF_MSG_LINK, "phy link down\n");
-
- vars->phy_link_up = 0;
-
- vars->line_speed = 0;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
- /* indicate no mac active */
- vars->mac_type = MAC_TYPE_NONE;
- }
-
- DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
- vars->link_status, vars->phy_link_up);
- DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
- vars->line_speed, vars->duplex, vars->flow_ctrl);
-}
static void bnx2x_update_mng(struct link_params *params, u32 link_status)
{
@@ -800,62 +613,69 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
-static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port)
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+ u32 mdc_mdio_access, u8 port)
{
- u32 emac_base;
-
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- /* All MDC/MDIO is directed through single EMAC */
+ u32 emac_base = 0;
+ switch (mdc_mdio_access) {
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+ if (REG_RD(bp, NIG_REG_PORT_SWAP))
+ emac_base = GRCBASE_EMAC1;
+ else
+ emac_base = GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
if (REG_RD(bp, NIG_REG_PORT_SWAP))
emac_base = GRCBASE_EMAC0;
else
emac_base = GRCBASE_EMAC1;
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ break;
+ case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
break;
default:
- emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
break;
}
return emac_base;
}
-u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 val)
+u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
{
u32 tmp, saved_mode;
u8 i, rc = 0;
- u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
/* set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
- saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
EMAC_MDIO_MODE_CLOCK_CNT);
tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
- REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
+ REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
udelay(40);
/* address */
- tmp = ((phy_addr << 21) | (devad << 16) | reg |
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ tmp = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -866,15 +686,15 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
rc = -EFAULT;
} else {
/* data */
- tmp = ((phy_addr << 21) | (devad << 16) | val |
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, mdio_ctrl +
+ tmp = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
@@ -888,42 +708,41 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
}
/* Restore the saved mode */
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
return rc;
}
-u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 *ret_val)
+u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
u32 val, saved_mode;
u16 i;
u8 rc = 0;
- u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
/* set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
- saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
- val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL |
+ saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
EMAC_MDIO_MODE_CLOCK_CNT));
val |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
- REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
+ REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
udelay(40);
/* address */
- val = ((phy_addr << 21) | (devad << 16) | reg |
+ val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
- val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -937,15 +756,15 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
} else {
/* data */
- val = ((phy_addr << 21) | (devad << 16) |
+ val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
for (i = 0; i < 50; i++) {
udelay(10);
- val = REG_RD(bp, mdio_ctrl +
+ val = REG_RD(bp, phy->mdio_ctrl +
EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
@@ -961,13 +780,49 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
}
/* Restore the saved mode */
- REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
return rc;
}
+u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val)
+{
+ u8 phy_index;
+ /**
+ * Probe for the phy according to the given phy_addr, and execute
+ * the read request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_read(params->bp,
+ &params->phy[phy_index], devad,
+ reg, ret_val);
+ }
+ }
+ return -EINVAL;
+}
+
+u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val)
+{
+ u8 phy_index;
+ /**
+ * Probe for the phy according to the given phy_addr, and execute
+ * the write request on it
+ */
+ for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].addr == phy_addr) {
+ return bnx2x_cl45_write(params->bp,
+ &params->phy[phy_index], devad,
+ reg, val);
+ }
+ }
+ return -EINVAL;
+}
+
static void bnx2x_set_aer_mmd(struct link_params *params,
- struct link_vars *vars)
+ struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
u32 ser_lane;
@@ -977,16 +832,202 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- offset = (vars->phy_flags & PHY_XGXS_FLAG) ?
- (params->phy_addr + ser_lane) : 0;
+ offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+ (phy->addr + ser_lane) : 0;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
}
-static void bnx2x_set_master_ln(struct link_params *params)
+/******************************************************************/
+/* Internal phy section */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ /* Set Clause 22 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+ udelay(500);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+ udelay(500);
+ /* Set Clause 45 */
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+ u32 val;
+
+ DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+ val = SERDES_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ bnx2x_set_serdes_access(bp, port);
+
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
+ port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
+}
+
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u32 val;
+ DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+ port = params->port;
+
+ val = XGXS_RESET_BITS << (port*16);
+
+ /* reset and unreset the SerDes/XGXS */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+ udelay(500);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
+ port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ params->phy[INT_PHY].def_md_devad);
+}
+
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_10g;
+ u8 port = params->port;
+
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+
+ if (vars->link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
+
+ vars->phy_link_up = 1;
+ vars->duplex = DUPLEX_FULL;
+ switch (vars->link_status &
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ case LINK_10THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_10TFD:
+ vars->line_speed = SPEED_10;
+ break;
+
+ case LINK_100TXHD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_100T4:
+ case LINK_100TXFD:
+ vars->line_speed = SPEED_100;
+ break;
+
+ case LINK_1000THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_1000TFD:
+ vars->line_speed = SPEED_1000;
+ break;
+
+ case LINK_2500THD:
+ vars->duplex = DUPLEX_HALF;
+ /* fall thru */
+ case LINK_2500TFD:
+ vars->line_speed = SPEED_2500;
+ break;
+
+ case LINK_10GTFD:
+ vars->line_speed = SPEED_10000;
+ break;
+
+ case LINK_12GTFD:
+ vars->line_speed = SPEED_12000;
+ break;
+
+ case LINK_12_5GTFD:
+ vars->line_speed = SPEED_12500;
+ break;
+
+ case LINK_13GTFD:
+ vars->line_speed = SPEED_13000;
+ break;
+
+ case LINK_15GTFD:
+ vars->line_speed = SPEED_15000;
+ break;
+
+ case LINK_16GTFD:
+ vars->line_speed = SPEED_16000;
+ break;
+
+ default:
+ break;
+ }
+ vars->flow_ctrl = 0;
+ if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+ if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+ vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+ if (!vars->flow_ctrl)
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (vars->line_speed &&
+ ((vars->line_speed == SPEED_10) ||
+ (vars->line_speed == SPEED_100))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ } else {
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ }
+
+ /* anything 10 and over uses the bmac */
+ link_10g = ((vars->line_speed == SPEED_10000) ||
+ (vars->line_speed == SPEED_12000) ||
+ (vars->line_speed == SPEED_12500) ||
+ (vars->line_speed == SPEED_13000) ||
+ (vars->line_speed == SPEED_15000) ||
+ (vars->line_speed == SPEED_16000));
+ if (link_10g)
+ vars->mac_type = MAC_TYPE_BMAC;
+ else
+ vars->mac_type = MAC_TYPE_EMAC;
+
+ } else { /* link down */
+ DP(NETIF_MSG_LINK, "phy link down\n");
+
+ vars->phy_link_up = 0;
+
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+ }
+
+ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
+ vars->link_status, vars->phy_link_up);
+ DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
+ vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+
+static void bnx2x_set_master_ln(struct link_params *params,
+ struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
@@ -995,47 +1036,44 @@ static void bnx2x_set_master_ln(struct link_params *params)
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
/* set the master_ln for AN */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
&new_master_ln);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2 ,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
(new_master_ln | ser_lane));
}
-static u8 bnx2x_reset_unicore(struct link_params *params)
+static u8 bnx2x_reset_unicore(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 set_serdes)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
/* reset the unicore */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
(mii_control |
MDIO_COMBO_IEEO_MII_CONTROL_RESET));
- if (params->switch_cfg == SWITCH_CFG_1G)
- bnx2x_set_serdes_access(params);
+ if (set_serdes)
+ bnx2x_set_serdes_access(bp, params->port);
/* wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
/* the reset erased the previous bank value */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
@@ -1051,7 +1089,8 @@ static u8 bnx2x_reset_unicore(struct link_params *params)
}
-static void bnx2x_set_swap_lanes(struct link_params *params)
+static void bnx2x_set_swap_lanes(struct link_params *params,
+ struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
/* Each two bits represents a lane number:
@@ -1069,71 +1108,62 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_RX_LN_SWAP,
(rx_lane_swap |
MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TX_LN_SWAP,
(tx_lane_swap |
MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
-static void bnx2x_set_parallel_detection(struct link_params *params,
- u8 phy_flags)
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 control2;
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
&control2);
- if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
- DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
- params->speed_cap_mask, control2);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+ phy->speed_cap_mask, control2);
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
control2);
- if ((phy_flags & PHY_XGXS_FLAG) &&
- (params->speed_cap_mask &
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
&control2);
@@ -1142,15 +1172,13 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
control2);
/* Disable parallel detection of HiG */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
@@ -1158,7 +1186,8 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
}
}
-static void bnx2x_set_autoneg(struct link_params *params,
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
struct link_vars *vars,
u8 enable_cl73)
{
@@ -1166,9 +1195,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
u16 reg_val;
/* CL37 Autoneg */
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -1179,15 +1206,13 @@ static void bnx2x_set_autoneg(struct link_params *params,
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* Enable/Disable Autodetection */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
@@ -1198,14 +1223,12 @@ static void bnx2x_set_autoneg(struct link_params *params,
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
/* Enable TetonII and BAM autoneg */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
@@ -1218,23 +1241,20 @@ static void bnx2x_set_autoneg(struct link_params *params,
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_BAM_NEXT_PAGE,
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_UCTRL,
0xe);
/* Enable BAM Station Manager*/
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -1242,20 +1262,18 @@ static void bnx2x_set_autoneg(struct link_params *params,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV2,
&reg_val);
- if (params->speed_cap_mask &
+ if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
- if (params->speed_cap_mask &
+ if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV2,
reg_val);
@@ -1266,38 +1284,35 @@ static void bnx2x_set_autoneg(struct link_params *params,
} else /* CL73 Autoneg Disabled */
reg_val = 0;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
/* program SerDes, forced speed */
-static void bnx2x_program_serdes(struct link_params *params,
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* program duplex, disable autoneg and sgmii*/
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
- if (params->req_duplex == DUPLEX_FULL)
+ if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* program speed
- needed only if the speed is greater than 1G (2.5G or 10G) */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* clearing the speed value before setting the right speed */
@@ -1320,14 +1335,14 @@ static void bnx2x_program_serdes(struct link_params *params,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
-static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
+static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -1335,29 +1350,28 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
/* configure the 48 bits for BAM AN */
/* set extended capabilities */
- if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
- if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_UP1, val);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_UP3, 0x400);
}
-static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+ struct link_params *params, u16 *ieee_fc)
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
/* resolve pause mode and advertisement
* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
- switch (params->req_flow_ctrl) {
+ switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
*ieee_fc |=
@@ -1385,30 +1399,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
}
-static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
+static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
/* for AN, we are always publishing full duplex */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1, val);
}
-static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
@@ -1417,14 +1431,12 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
&mii_control);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
(mii_control |
@@ -1432,16 +1444,14 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
(mii_control |
@@ -1450,7 +1460,8 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
}
}
-static void bnx2x_initialize_sgmii_process(struct link_params *params,
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+ struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
@@ -1458,8 +1469,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
/* in SGMII mode, the unicore is always slave */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
@@ -1468,8 +1478,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
@@ -1479,8 +1488,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
/* set speed, disable autoneg */
u16 mii_control;
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
&mii_control);
@@ -1508,18 +1516,17 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
}
/* setting the full duplex */
- if (params->req_duplex == DUPLEX_FULL)
+ if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
mii_control);
} else { /* AN mode */
/* enable and restart AN */
- bnx2x_restart_autoneg(params, 0);
+ bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -1549,91 +1556,24 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
default:
break;
}
+ if (pause_result & (1<<0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1<<1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
}
-static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
- struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u8 ext_phy_addr;
- u16 ld_pause; /* local */
- u16 lp_pause; /* link partner */
- u16 an_complete; /* AN complete */
- u16 pause_result;
- u8 ret = 0;
- u32 ext_phy_type;
- u8 port = params->port;
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* read twice */
-
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_STATUS, &an_complete);
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_STATUS, &an_complete);
-
- if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
- ret = 1;
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
- pause_result = (ld_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
- pause_result |= (lp_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
- pause_result);
- bnx2x_pause_resolve(vars, pause_result);
- if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
- ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, &ld_pause);
-
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LP, &lp_pause);
- pause_result = (ld_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
- pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
-
- bnx2x_pause_resolve(vars, pause_result);
- DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
- pause_result);
- }
- }
- return ret;
-}
-
-static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
+static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 pd_10g, status2_1000x;
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ if (phy->req_line_speed != SPEED_AUTO_NEG)
+ return 0;
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
&status2_1000x);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
&status2_1000x);
@@ -1643,8 +1583,7 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
return 1;
}
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
&pd_10g);
@@ -1657,9 +1596,10 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
return 0;
}
-static void bnx2x_flow_ctrl_resolve(struct link_params *params,
- struct link_vars *vars,
- u32 gp_status)
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
{
struct bnx2x *bp = params->bp;
u16 ld_pause; /* local driver */
@@ -1669,12 +1609,13 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
/* resolve from gp_status in case of AN complete and not sgmii */
- if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
- (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
- (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
- if (bnx2x_direct_parallel_detect_used(params)) {
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+ (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+ if (bnx2x_direct_parallel_detect_used(phy, params)) {
vars->flow_ctrl = params->req_fc_auto_adv;
return;
}
@@ -1684,13 +1625,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_ADV1,
&ld_pause);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB1,
MDIO_CL73_IEEEB1_AN_LP_ADV1,
&lp_pause);
@@ -1703,14 +1642,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
pause_result);
} else {
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
&ld_pause);
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
&lp_pause);
@@ -1722,26 +1658,18 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
pause_result);
}
bnx2x_pause_resolve(vars, pause_result);
- } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
- (bnx2x_ext_phy_resolve_fc(params, vars))) {
- return;
- } else {
- if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
- vars->flow_ctrl = params->req_fc_auto_adv;
- else
- vars->flow_ctrl = params->req_flow_ctrl;
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
-static void bnx2x_check_fallback_to_cl37(struct link_params *params)
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 rx_status, ustat_val, cl37_fsm_recieved;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_RX0,
MDIO_RX0_RX_STATUS,
&rx_status);
@@ -1749,16 +1677,14 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
/* Step 2: Check CL73 state machine */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_USTAT1,
&ustat_val);
@@ -1773,8 +1699,7 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
}
/* Step 3: Check CL37 Message Pages received to indicate LP
supports only CL37 */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_REMOTE_PHY,
MDIO_REMOTE_PHY_MISC_RX_STATUS,
&cl37_fsm_recieved);
@@ -1792,25 +1717,45 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
connected to a device which does not support cl73, but does support
cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
/* Disable CL73 */
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
MDIO_REG_BANK_CL73_IEEEB0,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
0);
/* Restart CL37 autoneg */
- bnx2x_restart_autoneg(params, 0);
+ bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
}
-static u8 bnx2x_link_settings_status(struct link_params *params,
- struct link_vars *vars,
- u32 gp_status,
- u8 ext_phy_link_up)
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ if (bnx2x_direct_parallel_detect_used(phy, params))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed;
+ u16 new_line_speed , gp_status;
u8 rc = 0;
- vars->link_status = 0;
+ /* Read gp_status */
+ CL45_RD_OVER_CL22(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
gp_status);
@@ -1823,7 +1768,12 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
else
vars->duplex = DUPLEX_HALF;
- bnx2x_flow_ctrl_resolve(params, vars, gp_status);
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
switch (gp_status & GP_STATUS_SPEED_MASK) {
case GP_STATUS_10M:
@@ -1905,56 +1855,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
return -EINVAL;
}
- /* Upon link speed change set the NIG into drain mode.
- Comes to deals with possible FIFO glitch due to clk change
- when speed is decreased without link down indicator */
- if (new_line_speed != vars->line_speed) {
- if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
- ext_phy_link_up) {
- DP(NETIF_MSG_LINK, "Internal link speed %d is"
- " different than the external"
- " link speed %d\n", new_line_speed,
- vars->line_speed);
- vars->phy_link_up = 0;
- return 0;
- }
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
- msleep(1);
- }
vars->line_speed = new_line_speed;
- vars->link_status |= LINK_STATUS_SERDES_LINK;
-
- if ((params->req_line_speed == SPEED_AUTO_NEG) &&
- ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
- vars->autoneg = AUTO_NEG_ENABLED;
-
- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
- vars->autoneg |= AUTO_NEG_COMPLETE;
- vars->link_status |=
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
- }
-
- vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
- vars->link_status |=
- LINK_STATUS_PARALLEL_DETECTION_USED;
-
- }
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- vars->link_status |=
- LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
-
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- vars->link_status |=
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
} else { /* link_down */
DP(NETIF_MSG_LINK, "phy link down\n");
@@ -1963,38 +1864,32 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->autoneg = AUTO_NEG_DISABLED;
vars->mac_type = MAC_TYPE_NONE;
- if ((params->req_line_speed == SPEED_AUTO_NEG) &&
- ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
- bnx2x_check_fallback_to_cl37(params);
+ bnx2x_check_fallback_to_cl37(phy, params);
}
}
DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
gp_status, vars->phy_link_up, vars->line_speed);
- DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
- " autoneg 0x%x\n",
- vars->duplex,
- vars->flow_ctrl, vars->autoneg);
- DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
-
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
static void bnx2x_set_gmii_tx_driver(struct link_params *params)
{
struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
u16 lp_up2;
u16 tx_driver;
u16 bank;
/* read precomp */
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
@@ -2008,8 +1903,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_RD_OVER_CL22(bp, phy,
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
@@ -2018,8 +1912,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
+ CL45_WR_OVER_CL22(bp, phy,
bank,
MDIO_TX0_TX_DRIVER, tx_driver);
}
@@ -2027,7 +1920,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
}
static u8 bnx2x_emac_program(struct link_params *params,
- u32 line_speed, u32 duplex)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -2039,7 +1932,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
(EMAC_MODE_25G_MODE |
EMAC_MODE_PORT_MII_10M |
EMAC_MODE_HALF_DUPLEX));
- switch (line_speed) {
+ switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
break;
@@ -2058,371 +1951,1369 @@ static u8 bnx2x_emac_program(struct link_params *params,
default:
/* 10G not valid for EMAC */
- DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed);
+ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+ vars->line_speed);
return -EINVAL;
}
- if (duplex == DUPLEX_HALF)
+ if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
mode);
- bnx2x_set_led(params, LED_MODE_OPER, line_speed);
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
}
-/*****************************************************************************/
-/* External Phy section */
-/*****************************************************************************/
-void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+ struct link_params *params)
{
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+
+ u16 bank, i = 0;
+ struct bnx2x *bp = params->bp;
+
+ for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+ bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+ CL45_WR_OVER_CL22(bp, phy,
+ bank,
+ MDIO_RX0_RX_EQ_BOOST,
+ phy->rx_preemphasis[i]);
+ }
+
+ for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+ bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+ CL45_WR_OVER_CL22(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER,
+ phy->tx_preemphasis[i]);
+ }
}
-static void bnx2x_ext_phy_reset(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u32 ext_phy_type;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
+ u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+ if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+ bnx2x_set_preemphasis(phy, params);
- DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port);
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* The PHY reset is controled by GPIO 1
- * Give it 1ms of reset pulse
- */
- if (vars->phy_flags & PHY_XGXS_FLAG) {
+ /* forced speed requested? */
+ if (vars->line_speed != SPEED_AUTO_NEG ||
+ (SINGLE_MEDIA_DIRECT(params) &&
+ params->loopback_mode == LOOPBACK_EXT)) {
+ DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "XGXS Direct\n");
- break;
+ /* disable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, 0);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
+ /* program speed and duplex */
+ bnx2x_program_serdes(phy, params, vars);
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ } else { /* AN_mode */
+ DP(NETIF_MSG_LINK, "not SGMII, AN\n");
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, params->port);
+ /* AN enabled */
+ bnx2x_set_brcm_cl37_advertisment(phy, params);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0xa040);
- break;
+ /* program duplex & pause advertisement (for aneg) */
+ bnx2x_set_ieee_aneg_advertisment(phy, params,
+ vars->ieee_fc);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- break;
+ /* enable autoneg */
+ bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ /* enable and restart AN */
+ bnx2x_restart_autoneg(phy, params, enable_cl73);
+ }
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ } else { /* SGMII mode */
+ DP(NETIF_MSG_LINK, "SGMII\n");
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ bnx2x_initialize_sgmii_process(phy, params, vars);
+ }
+}
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
- break;
+static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 rc;
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd(params, phy);
+ rc = bnx2x_reset_unicore(params, phy, 1);
+ /* reset the SerDes and wait for reset bit return low */
+ if (rc != 0)
+ return rc;
+ bnx2x_set_aer_mmd(params, phy);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- DP(NETIF_MSG_LINK, "XGXS 8072\n");
+ return rc;
+}
- /* Unset Low Power Mode and SW reset */
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 rc;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ if ((phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10))) ||
+ (!phy->req_line_speed &&
+ (phy->speed_cap_mask >=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->speed_cap_mask <
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+ ))
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
- break;
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ bnx2x_set_aer_mmd(params, phy);
+ bnx2x_set_master_ln(params, phy);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- DP(NETIF_MSG_LINK, "XGXS 8073\n");
+ rc = bnx2x_reset_unicore(params, phy, 0);
+ /* reset the SerDes and wait for reset bit return low */
+ if (rc != 0)
+ return rc;
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ bnx2x_set_aer_mmd(params, phy);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ /* setting the masterLn_def again after the reset */
+ bnx2x_set_master_ln(params, phy);
+ bnx2x_set_swap_lanes(params, phy);
+
+ return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 cnt, ctrl;
+ /* Wait for soft reset to get cleared upto 1 sec */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
+ if (!(ctrl & (1<<15)))
break;
+ msleep(1);
+ }
+ DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+ return cnt;
+}
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+ u8 port = params->port;
+ u32 mask;
+ struct bnx2x *bp = params->bp;
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ /* setting the status to report on link up
+ for either XGXS or SerDes */
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, params->port);
- break;
+ if (params->switch_cfg == SWITCH_CFG_10G) {
+ mask = (NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_XGXS0_LINK_STATUS);
+ DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- params->port);
+ } else { /* SerDes */
+ mask = NIG_MASK_SERDES0_LINK_STATUS;
+ DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+ if (!(SINGLE_MEDIA_DIRECT(params)) &&
+ params->phy[INT_PHY].type !=
+ PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+ mask |= NIG_MASK_MI_INT;
+ DP(NETIF_MSG_LINK, "enabled external phy int\n");
+ }
+ }
+ bnx2x_bits_en(bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ mask);
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, params->port);
+ DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+ (params->switch_cfg == SWITCH_CFG_10G),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+ DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
- break;
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+ u8 exp_mi_int)
+{
+ u32 latch_status = 0;
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- params->ext_phy_config);
- break;
+ /**
+ * Disable the MI INT ( external phy int ) by writing 1 to the
+ * status register. Link down indication is high-active-signal,
+ * so in this case we need to write the status to clear the XOR
+ */
+ /* Read Latched signals */
+ latch_status = REG_RD(bp,
+ NIG_REG_LATCH_STATUS_0 + port*8);
+ DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+ /* Handle only those with latched-signal=up.*/
+ if (exp_mi_int)
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+ else
+ bnx2x_bits_dis(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0
+ + port*4,
+ NIG_STATUS_EMAC0_MI_INT);
+
+ if (latch_status & 1) {
+
+ /* For all latched-signal=up : Re-Arm Latch signals */
+ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+ (latch_status & 0xfffe) | (latch_status & 1));
+ }
+ /* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+ struct link_vars *vars, u8 is_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ /* first reset all status
+ * we assume only one line will be change at a time */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
+ if (vars->phy_link_up) {
+ if (is_10g) {
+ /* Disable the 10G link interrupt
+ * by writing 1 to the status register
+ */
+ DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ NIG_STATUS_XGXS0_LINK10G);
+
+ } else if (params->switch_cfg == SWITCH_CFG_10G) {
+ /* Disable the link interrupt
+ * by writing 1 to the relevant lane
+ * in the status register
+ */
+ u32 ser_lane = ((params->lane_config &
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+ DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
+ vars->line_speed);
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ ((1 << ser_lane) <<
+ NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
+
+ } else { /* SerDes */
+ DP(NETIF_MSG_LINK, "SerDes phy link up\n");
+ /* Disable the link interrupt
+ * by writing 1 to the status register
+ */
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ NIG_STATUS_SERDES0_LINK_STATUS);
}
- } else { /* SerDes */
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "SerDes Direct\n");
- break;
+ }
+}
+
+static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u8 *str_ptr = str;
+ u32 mask = 0xf0000000;
+ u8 shift = 8*4;
+ u8 digit;
+ u8 remove_leading_zeros = 1;
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ *str_ptr = '\0';
+ (*len)--;
+ return -EINVAL;
+ }
+ while (shift > 0) {
+
+ shift -= 4;
+ digit = ((num & mask) >> shift);
+ if (digit == 0 && remove_leading_zeros) {
+ mask = mask >> 4;
+ continue;
+ } else if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ mask = mask >> 4;
+ if (shift == 4*4) {
+ *str_ptr = '.';
+ str_ptr++;
+ (*len)--;
+ remove_leading_zeros = 1;
+ }
+ }
+ return 0;
+}
+
+
+static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
+u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len)
+{
+ struct bnx2x *bp;
+ u32 spirom_ver = 0;
+ u8 status = 0;
+ u8 *ver_p = version;
+ u16 remain_len = len;
+ if (version == NULL || params == NULL)
+ return -EINVAL;
+ bp = params->bp;
+
+ /* Extract first external phy*/
+ version[0] = '\0';
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+ if (params->phy[EXT_PHY1].format_fw_ver) {
+ status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p += (len - remain_len);
+ }
+ if ((params->num_phys == MAX_PHYS) &&
+ (params->phy[EXT_PHY2].ver_addr != 0)) {
+ spirom_ver = REG_RD(bp,
+ params->phy[EXT_PHY2].ver_addr);
+ if (params->phy[EXT_PHY2].format_fw_ver) {
+ *ver_p = '/';
+ ver_p++;
+ remain_len--;
+ status |= params->phy[EXT_PHY2].format_fw_ver(
+ spirom_ver,
+ ver_p,
+ &remain_len);
+ ver_p = version + (len - remain_len);
+ }
+ }
+ *ver_p = '\0';
+ return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+
+ if (phy->req_line_speed != SPEED_1000) {
+ u32 md_devad;
+
+ DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+ /* change the uni_phy_addr in the nig */
+ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port*0x18));
+
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
+
+ bnx2x_cl45_write(bp, phy,
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
+ msleep(200);
+ /* set aer mmd back */
+ bnx2x_set_aer_mmd(params, phy);
+
+ /* and md_devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ md_devad);
+
+ } else {
+ u16 mii_ctrl;
+ DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ bnx2x_cl45_read(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ &mii_ctrl);
+ bnx2x_cl45_write(bp, phy, 5,
+ (MDIO_REG_BANK_COMBO_IEEE0 +
+ (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+ mii_ctrl |
+ MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+ }
+}
+
+/*
+ *------------------------------------------------------------------------
+ * bnx2x_override_led_value -
+ *
+ * Override the led value of the requested led
+ *
+ *------------------------------------------------------------------------
+ */
+u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
+ u32 led_idx, u32 value)
+{
+ u32 reg_val;
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- DP(NETIF_MSG_LINK, "SerDes 5482\n");
- bnx2x_ext_phy_hw_reset(bp, params->port);
+ /* If port 0 then use EMAC0, else use EMAC1*/
+ u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+ DP(NETIF_MSG_LINK,
+ "bnx2x_override_led_value() port %x led_idx %d value %d\n",
+ port, led_idx, value);
+
+ switch (led_idx) {
+ case 0: /* 10MB led */
+ /* Read the current value of the LED register in
+ the EMAC block */
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 10M_OVERRIDE bit,
+ otherwise reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_10MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 1: /*100MB led */
+ /*Read the current value of the LED register in
+ the EMAC block */
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 100M_OVERRIDE bit,
+ otherwise reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_100MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 2: /* 1000MB led */
+ /* Read the current value of the LED register in the
+ EMAC block */
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
+ reset it. */
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 3: /* 2500MB led */
+ /* Read the current value of the LED register in the
+ EMAC block*/
+ reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
+ /* Set the OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
+ reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
+ (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ break;
+ case 4: /*10G led */
+ if (port == 0) {
+ REG_WR(bp, NIG_REG_LED_10G_P0,
+ value);
+ } else {
+ REG_WR(bp, NIG_REG_LED_10G_P1,
+ value);
+ }
+ break;
+ case 5: /* TRAFFIC led */
+ /* Find if the traffic control is via BMAC or EMAC */
+ if (port == 0)
+ reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
+ else
+ reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
+
+ /* Override the traffic led in the EMAC:*/
+ if (reg_val == 1) {
+ /* Read the current value of the LED register in
+ the EMAC block */
+ reg_val = REG_RD(bp, emac_base +
+ EMAC_REG_EMAC_LED);
+ /* Set the TRAFFIC_OVERRIDE bit to 1 */
+ reg_val |= EMAC_LED_OVERRIDE;
+ /* If value is 1, set the TRAFFIC bit, otherwise
+ reset it.*/
+ reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
+ (reg_val & ~EMAC_LED_TRAFFIC);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ } else { /* Override the traffic led in the BMAC: */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
+ value);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK,
+ "bnx2x_override_led_value() unknown led index %d "
+ "(should be 0-5)\n", led_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+u8 bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed)
+{
+ u8 port = params->port;
+ u16 hw_led_mode = params->hw_led_mode;
+ u8 rc = 0, phy_idx;
+ u32 tmp;
+ u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+ DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+ speed, hw_led_mode);
+ /* In case */
+ for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].set_link_led) {
+ params->phy[phy_idx].set_link_led(
+ &params->phy[phy_idx], params, mode);
+ }
+ }
+
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ SHARED_HW_CFG_LED_MAC1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+ break;
+
+ case LED_MODE_OPER:
+ /**
+ * For all other phys, OPER mode is same as ON, so in case
+ * link is down, do nothing
+ **/
+ if (!vars->link_up)
break;
+ case LED_MODE_ON:
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ /**
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+ } else {
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ }
- default:
- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
- params->ext_phy_config);
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
+ port*4, 0);
+ /* Set blinking rate to ~15.9Hz */
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+ LED_BLINK_RATE_VAL);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+ port*4, 1);
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp & (~EMAC_LED_OVERRIDE)));
+
+ if (CHIP_IS_E1(bp) &&
+ ((speed == SPEED_2500) ||
+ (speed == SPEED_1000) ||
+ (speed == SPEED_100) ||
+ (speed == SPEED_10))) {
+ /* On Everest 1 Ax chip versions for speeds less than
+ 10G LED scheme is different */
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ + port*4, 1);
+ REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+ port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+ port*4, 1);
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+ mode);
+ break;
+ }
+ return rc;
+
+}
+
+/**
+ * This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 gp_status = 0, phy_index = 0;
+ u8 ext_phy_link_up = 0, serdes_phy_type;
+ struct link_vars temp_vars;
+
+ CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ /* link is up only if both local phy and external phy are up */
+ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+ return -ESRCH;
+
+ switch (params->num_phys) {
+ case 1:
+ /* No external PHY */
+ return 0;
+ case 2:
+ ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+ &params->phy[EXT_PHY1],
+ params, &temp_vars);
+ break;
+ case 3: /* Dual Media */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ serdes_phy_type = ((params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_XFP_FIBER));
+
+ if (is_serdes != serdes_phy_type)
+ continue;
+ if (params->phy[phy_index].read_status) {
+ ext_phy_link_up |=
+ params->phy[phy_index].read_status(
+ &params->phy[phy_index],
+ params, &temp_vars);
+ }
+ }
+ break;
+ }
+ if (ext_phy_link_up)
+ return 0;
+ return -ESRCH;
+}
+
+static u8 bnx2x_link_initialize(struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 rc = 0;
+ u8 phy_index, non_ext_phy;
+ struct bnx2x *bp = params->bp;
+ /**
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+ /**
+ * Initialize the internal phy in case this is a direct board
+ * (no external phys), or this board has external phy which requires
+ * to first.
+ */
+
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(
+ &params->phy[INT_PHY],
+ params, vars);
+
+ /* init ext phy and enable link state int */
+ non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+ (params->loopback_mode == LOOPBACK_XGXS));
+
+ if (non_ext_phy ||
+ (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+ (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+ struct bnx2x_phy *phy = &params->phy[INT_PHY];
+ if (vars->line_speed == SPEED_AUTO_NEG)
+ bnx2x_set_parallel_detection(phy, params);
+ bnx2x_init_internal_phy(phy, params, vars);
+ }
+
+ /* Init external phy*/
+ if (!non_ext_phy)
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ /**
+ * No need to initialize second phy in case of first
+ * phy only selection. In case of second phy, we do
+ * need to initialize the first phy, since they are
+ * connected.
+ **/
+ if (phy_index == EXT_PHY2 &&
+ (bnx2x_phy_selection(params) ==
+ PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+ DP(NETIF_MSG_LINK, "Not initializing"
+ "second phy\n");
+ continue;
+ }
+ params->phy[phy_index].config_init(
+ &params->phy[phy_index],
+ params, vars);
+ }
+
+ /* Reset the interrupt indication after phy was initialized */
+ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+ params->port*4,
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+ return rc;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ /* reset the SerDes/XGXS */
+ REG_WR(params->bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ /* HW reset */
+ gpio_port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+ DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static u8 bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+
+ DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+ /* indicate no mac active */
+ vars->mac_type = MAC_TYPE_NONE;
+
+ /* update shared memory */
+ vars->link_status = 0;
+ vars->line_speed = 0;
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* activate nig drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+ /* disable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ msleep(10);
+
+ /* reset BigMac */
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ return 0;
+}
+
+static u8 bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u8 rc = 0;
+
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ vars->link_status |=
+ LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ vars->link_status |=
+ LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+
+ if (link_10g) {
+ bnx2x_bmac_enable(params, vars, 0);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, SPEED_10000);
+ } else {
+ rc = bnx2x_emac_program(params, vars);
+
+ bnx2x_emac_enable(params, vars, 0);
+
+ /* AN complete? */
+ if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ SINGLE_MEDIA_DIRECT(params))
+ bnx2x_set_gmii_tx_driver(params);
+ }
+
+ /* PBF - link up */
+ rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+ vars->line_speed);
+
+ /* disable drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+ /* update shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ msleep(20);
+ return rc;
+}
+/**
+ * The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ * to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ * phy (XGXS) need to up as well as the external link of the
+ * phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ * external phy needs to be up, and at least one of the 2
+ * external phy link must be up.
+ */
+u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ struct link_vars phy_vars[MAX_PHYS];
+ u8 port = params->port;
+ u8 link_10g, phy_index;
+ u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
+ u8 is_mi_int = 0;
+ u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+ u8 active_external_phy = INT_PHY;
+ vars->link_status = 0;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ phy_vars[phy_index].flow_ctrl = 0;
+ phy_vars[phy_index].link_status = 0;
+ phy_vars[phy_index].line_speed = 0;
+ phy_vars[phy_index].duplex = DUPLEX_FULL;
+ phy_vars[phy_index].phy_link_up = 0;
+ phy_vars[phy_index].link_up = 0;
+ }
+
+ DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+ port, (vars->phy_flags & PHY_XGXS_FLAG),
+ REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+ is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+ port*0x18) > 0);
+ DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+ REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+ is_mi_int,
+ REG_RD(bp,
+ NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+ DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+ REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+ /* disable emac */
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+ /**
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that the instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ struct bnx2x_phy *phy = &params->phy[phy_index];
+ if (!phy->read_status)
+ continue;
+ /* Read link status and params of this ext phy */
+ cur_link_up = phy->read_status(phy, params,
+ &phy_vars[phy_index]);
+ if (cur_link_up) {
+ DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+ phy_index);
+ } else {
+ DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+ phy_index);
+ continue;
+ }
+
+ if (!ext_phy_link_up) {
+ ext_phy_link_up = 1;
+ active_external_phy = phy_index;
+ } else {
+ switch (bnx2x_phy_selection(params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ /**
+ * In this option, the first PHY makes sure to pass the
+ * traffic through itself only.
+ * Its not clear how to reset the link on the second phy
+ **/
+ active_external_phy = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ /**
+ * In this option, the first PHY makes sure to pass the
+ * traffic through the second PHY.
+ **/
+ active_external_phy = EXT_PHY2;
+ break;
+ default:
+ /**
+ * Link indication on both PHYs with the following cases
+ * is invalid:
+ * - FIRST_PHY means that second phy wasn't initialized,
+ * hence its link is expected to be down
+ * - SECOND_PHY means that first phy should not be able
+ * to link up by itself (using configuration)
+ * - DEFAULT should be overriden during initialiazation
+ **/
+ DP(NETIF_MSG_LINK, "Invalid link indication"
+ "mpc=0x%x. DISABLING LINK !!!\n",
+ params->multi_phy_config);
+ ext_phy_link_up = 0;
+ break;
+ }
+ }
+ }
+ prev_line_speed = vars->line_speed;
+ /**
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
+ if (params->phy[INT_PHY].read_status)
+ params->phy[INT_PHY].read_status(
+ &params->phy[INT_PHY],
+ params, vars);
+ /**
+ * The INT_PHY flow control reside in the vars. This include the
+ * case where the speed or flow control are not set to AUTO.
+ * Otherwise, the active external phy flow control result is set
+ * to the vars. The ext_phy_line_speed is needed to check if the
+ * speed is different between the internal phy and external phy.
+ * This case may be result of intermediate link speed change.
+ */
+ if (active_external_phy > INT_PHY) {
+ vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+ /**
+ * Link speed is taken from the XGXS. AN and FC result from
+ * the external phy.
+ */
+ vars->link_status |= phy_vars[active_external_phy].link_status;
+
+ /**
+ * if active_external_phy is first PHY and link is up - disable
+ * disable TX on second external PHY
+ */
+ if (active_external_phy == EXT_PHY1) {
+ if (params->phy[EXT_PHY2].phy_specific_func) {
+ DP(NETIF_MSG_LINK, "Disabling TX on"
+ " EXT_PHY2\n");
+ params->phy[EXT_PHY2].phy_specific_func(
+ &params->phy[EXT_PHY2],
+ params, DISABLE_TX);
+ }
+ }
+
+ ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+ vars->duplex = phy_vars[active_external_phy].duplex;
+ if (params->phy[active_external_phy].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+ active_external_phy);
+ }
+
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL) {
+ bnx2x_rearm_latch_signal(bp, port,
+ phy_index ==
+ active_external_phy);
break;
}
}
+ DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+ vars->link_status, ext_phy_line_speed);
+ /**
+ * Upon link speed change set the NIG into drain mode. Comes to
+ * deals with possible FIFO glitch due to clk change when speed
+ * is decreased without link down indicator
+ */
+
+ if (vars->phy_link_up) {
+ if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+ (ext_phy_line_speed != vars->line_speed)) {
+ DP(NETIF_MSG_LINK, "Internal link speed %d is"
+ " different than the external"
+ " link speed %d\n", vars->line_speed,
+ ext_phy_line_speed);
+ vars->phy_link_up = 0;
+ } else if (prev_line_speed != vars->line_speed) {
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
+ + params->port*4, 0);
+ msleep(1);
+ }
+ }
+
+ /* anything 10 and over uses the bmac */
+ link_10g = ((vars->line_speed == SPEED_10000) ||
+ (vars->line_speed == SPEED_12000) ||
+ (vars->line_speed == SPEED_12500) ||
+ (vars->line_speed == SPEED_13000) ||
+ (vars->line_speed == SPEED_15000) ||
+ (vars->line_speed == SPEED_16000));
+
+ bnx2x_link_int_ack(params, vars, link_10g);
+
+ /**
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
+ if (!(SINGLE_MEDIA_DIRECT(params))) {
+ DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+ " init_preceding = %d\n", ext_phy_link_up,
+ vars->phy_link_up,
+ params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST);
+ if (!(params->phy[EXT_PHY1].flags &
+ FLAGS_INIT_XGXS_FIRST)
+ && ext_phy_link_up && !vars->phy_link_up) {
+ vars->line_speed = ext_phy_line_speed;
+ if (vars->line_speed < SPEED_1000)
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ else
+ vars->phy_flags &= ~PHY_SGMII_FLAG;
+ bnx2x_init_internal_phy(&params->phy[INT_PHY],
+ params,
+ vars);
+ }
+ }
+ /**
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up
+ */
+ vars->link_up = (vars->phy_link_up &&
+ (ext_phy_link_up ||
+ SINGLE_MEDIA_DIRECT(params)));
+
+ if (vars->link_up)
+ rc = bnx2x_update_link_up(params, vars, link_10g);
+ else
+ rc = bnx2x_update_link_down(params, vars);
+
+ return rc;
+}
+
+
+/*****************************************************************************/
+/* External Phy section */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ msleep(1);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
- u32 shmem_base, u32 spirom_ver)
+ u32 spirom_ver, u32 ver_addr)
{
DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
(u16)(spirom_ver>>16), (u16)spirom_ver, port);
- REG_WR(bp, shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].ext_phy_fw_version),
- spirom_ver);
+
+ if (ver_addr)
+ REG_WR(bp, ver_addr, spirom_ver);
}
-static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port,
- u32 ext_phy_type, u8 ext_phy_addr,
- u32 shmem_base)
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
{
u16 fw_ver1, fw_ver2;
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD,
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2, &fw_ver2);
- bnx2x_save_spirom_version(bp, port, shmem_base,
- (u32)(fw_ver1<<16 | fw_ver2));
+ bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+ phy->ver_addr);
}
-
-static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr, u32 shmem_base)
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
{
- u16 val, fw_ver1, fw_ver2, cnt;
- /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/
- /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA819, 0x0014);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81A,
- 0xc200);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81B,
- 0x0000);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81C,
- 0x0300);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA817,
- 0x0009);
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* read modify write pause advertizing */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA818,
- &val);
- if (val & 1)
- break;
- udelay(5);
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n");
- bnx2x_save_spirom_version(bp, port,
- shmem_base, 0);
- return;
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
}
+ DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ u8 ret = 0;
+ /* read twice */
- /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA819, 0x0000);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr, MDIO_PMA_DEVAD,
- 0xA817, 0x000A);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA818,
- &val);
- if (val & 1)
- break;
- udelay(5);
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+ pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
}
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n");
- bnx2x_save_spirom_version(bp, port,
- shmem_base, 0);
+ return ret;
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_STATUS, &val);
+ if (val & (1<<5))
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ if ((val & (1<<0)) == 0)
+ vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/* common BCM8073/BCM8727 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ if (phy->req_line_speed == SPEED_10 ||
+ phy->req_line_speed == SPEED_100) {
+ vars->flow_ctrl = phy->req_flow_ctrl;
return;
}
- /* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81B,
- &fw_ver1);
- /* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- 0xA81C,
- &fw_ver2);
+ if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+ (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+ u16 pause_result;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
- bnx2x_save_spirom_version(bp, port,
- shmem_base, (fw_ver2<<16) | fw_ver1);
+ bnx2x_pause_resolve(vars, pause_result);
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+ pause_result);
+ }
}
-static void bnx2x_bcm8072_external_rom_boot(struct link_params *params)
+static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+ /* Boot port from external ROM */
+ /* EDC grst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
- /* Need to wait 200ms after reset */
- msleep(200);
- /* Boot port from external ROM
- * Set ser_boot_ctl bit in the MISC_CTRL1 register
- */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ /* ucode reboot and rst */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* set micro reset = 0 */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
- /* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 100ms for code download via SPI port */
- msleep(100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+ /* Release srst bit */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+ /* wait for 120ms for code download via SPI port */
+ msleep(120);
/* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- /* Wait 100ms */
- msleep(100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
+}
+
+static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
- bnx2x_save_bcm_spirom_ver(bp, port,
- ext_phy_type,
- ext_phy_addr,
- params->shmem_base);
+ if (val == 0) {
+ /* Mustn't set low power mode in 8073 A0 */
+ return;
+ }
+
+ /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
+ val &= ~(1<<13);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+
+ /* PLL controls */
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
+
+ /* Tx Controls */
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
+
+ /* Rx Controls */
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
+
+ /* Enable PLL sequencer (use read-modify-write to set bit 13) */
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
+ val |= (1<<13);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
}
-static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
+/******************************************************************/
+/* BCM8073 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
{
/* This is only required for 8073A1, version 102 only */
-
- struct bnx2x *bp = params->bp;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
u16 val;
/* Read 8073 HW revision*/
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_CHIP_REV, &val);
@@ -2431,9 +3322,7 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
return 0;
}
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2, &val);
@@ -2444,15 +3333,11 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
return 1;
}
-static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
+static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
{
- struct bnx2x *bp = params->bp;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
u16 val, cnt, cnt1 ;
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_CHIP_REV, &val);
@@ -2466,9 +3351,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
poll Dev1, Reg $C820: */
for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
&val);
@@ -2485,9 +3368,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
XAUI workaround has completed),
then continue on with system initialization.*/
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_XAUI_WA, &val);
if (val & (1<<15)) {
@@ -2505,143 +3386,385 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
return -EINVAL;
}
-static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr,
- u32 ext_phy_type,
- u32 shmem_base)
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
{
- /* Boot port from external ROM */
- /* EDC grst */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x0001);
+ /* Force KR or KX */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
- /* ucode reboot and rst */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x008c);
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 cl37_val;
+ struct bnx2x *bp = params->bp;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ }
+ DP(NETIF_MSG_LINK,
+ "Ext phy AN advertize cl37 0x%x\n", cl37_val);
- /* Reset internal microprocessor */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+ msleep(500);
+}
- /* Release srst bit */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0, tmp1;
+ u8 gpio_port;
+ DP(NETIF_MSG_LINK, "Init 8073\n");
- /* wait for 100ms for code download via SPI port */
- msleep(100);
+ gpio_port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- bnx2x_save_bcm_spirom_ver(bp, port,
- ext_phy_type,
- ext_phy_addr,
- shmem_base);
-}
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004);
-static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr,
- u32 shmem_base)
-{
- bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- shmem_base);
-}
+ bnx2x_8073_set_pause_cl37(params, phy, vars);
-static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port,
- u8 ext_phy_addr,
- u32 shmem_base)
-{
- bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- shmem_base);
+ bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-}
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
-static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
- /* Need to wait 100ms after reset */
- msleep(100);
+ DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
- /* Micro controller re-boot */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x018B);
+ /* Enable CL37 BAM */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
- /* Set soft reset */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ if (params->loopback_mode == LOOPBACK_EXT) {
+ bnx2x_807x_force_10G(bp, phy);
+ DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+ return 0;
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+ }
+ if (phy->req_line_speed != SPEED_AUTO_NEG) {
+ if (phy->req_line_speed == SPEED_10000) {
+ val = (1<<7);
+ } else if (phy->req_line_speed == SPEED_2500) {
+ val = (1<<5);
+ /* Note that 2.5G works only
+ when used with 1G advertisment */
+ } else
+ val = (1<<5);
+ } else {
+ val = 0;
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ val |= (1<<7);
+
+ /* Note that 2.5G works only when
+ used with 1G advertisment */
+ if (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+ val |= (1<<5);
+ DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+ }
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+ if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+ (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+ (phy->req_line_speed == SPEED_2500)) {
+ u16 phy_ver;
+ /* Allow 2.5G for A1 and above */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+ &phy_ver);
+ DP(NETIF_MSG_LINK, "Add 2.5G\n");
+ if (phy_ver > 0)
+ tmp1 |= 1;
+ else
+ tmp1 &= 0xfffe;
+ } else {
+ DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+ tmp1 &= 0xfffe;
+ }
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+ /* Add support for CL37 (passive mode) II */
- /* wait for 150ms for microcode load */
- msleep(150);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+ (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+ 0x20 : 0x40)));
- /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ /* Add support for CL37 (passive mode) III */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
- msleep(200);
- bnx2x_save_bcm_spirom_ver(bp, port,
- ext_phy_type,
- ext_phy_addr,
- params->shmem_base);
+ /* The SNR will improve about 2db by changing
+ BW and FEE main tap. Rest commands are executed
+ after link is up*/
+ if (bnx2x_8073_is_snr_needed(bp, phy))
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+ 0xFB0C);
+
+ /* Enable FEC (Forware Error Correction) Request in the AN */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+ tmp1 |= (1<<15);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Restart autoneg */
+ msleep(500);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+ ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+ return 0;
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up = 0;
+ u16 val1, val2;
+ u16 link_status = 0;
+ u16 an1000_status = 0;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+
+ DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+ /* clear the interrupt LASI status register */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+
+ DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+ /* Check the link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ link_up = ((val1 & 4) == 4);
+ DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+ if (link_up &&
+ ((phy->req_line_speed != SPEED_10000))) {
+ if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+ return 0;
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+ /* Check the link status on 1.1.2 */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+ "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+ link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+ if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+ /* The SNR will improve about 2dbby
+ changing the BW and FEE main tap.*/
+ /* The 1st write to change FFE main
+ tap is set before restart AN */
+ /* Change PLL Bandwidth in EDC
+ register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+ 0x26BC);
+
+ /* Change CDR Bandwidth in EDC register */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+ 0x0333);
+ }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &link_status);
+
+ /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
+ } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_2500;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+ params->port);
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+
+ if (link_up) {
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_8073_resolve_fc(phy, params, vars);
+ }
+ return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_port;
+ gpio_port = params->port;
+ DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+ gpio_port);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
+}
+
+/******************************************************************/
+/* BCM8705 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "init 8705\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+ /* BCM8705 doesn't have microcode, hence the 0 */
+ bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+ return 0;
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 link_up = 0;
+ u16 val1, rx_sd;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "read status 8705\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, 0xc809, &val1);
+
+ DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+ link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+ if (link_up) {
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
}
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
- u32 ext_phy_type, u8 ext_phy_addr,
- u8 tx_en)
+/******************************************************************/
+/* SFP+ module Section */
+/******************************************************************/
+static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port,
+ u8 tx_en)
{
u16 val;
DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
tx_en, port);
/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
&val);
@@ -2651,58 +3774,42 @@ static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
else
val |= (1<<15);
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
val);
}
-static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
+static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
if (byte_cnt > 16) {
DP(NETIF_MSG_LINK, "Reading from eeprom is"
" is limited to 0xf\n");
return -EINVAL;
}
/* Set the read command byte count */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
(byte_cnt | 0xa000));
/* Set the read command address */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
addr);
/* Activate read command */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
0x2c0f);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2721,18 +3828,14 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2743,14 +3846,12 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
return -EINVAL;
}
-static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
+static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
if (byte_cnt > 16) {
DP(NETIF_MSG_LINK, "Reading from eeprom is"
@@ -2759,40 +3860,30 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
}
/* Need to read from 1.8000 to clear it */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
&val);
/* Set the read command byte count */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
((byte_cnt < 2) ? 2 : byte_cnt));
/* Set the read command address */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
addr);
/* Set the destination address */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
0x8004,
MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
- bnx2x_cl45_write(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
0x8002);
@@ -2802,9 +3893,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2823,18 +3912,14 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
- bnx2x_cl45_read(bp, port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2846,21 +3931,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
- return bnx2x_8726_read_sfp_module_eeprom(params, addr,
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
+ return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
byte_cnt, o_buf);
- else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
- return bnx2x_8727_read_sfp_module_eeprom(params, addr,
+ else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
+ return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
byte_cnt, o_buf);
return -EINVAL;
}
-static u8 bnx2x_get_edc_mode(struct link_params *params,
+static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+ struct link_params *params,
u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
@@ -2868,10 +3953,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
*edc_mode = EDC_MODE_LIMITING;
/* First check for copper cable */
- if (bnx2x_read_sfp_module_eeprom(params,
- SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_CON_TYPE_ADDR,
+ 1,
+ &val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
@@ -2883,7 +3969,8 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
/* Check if its active cable( includes SFP+ module)
of passive cable*/
- if (bnx2x_read_sfp_module_eeprom(params,
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) !=
@@ -2923,10 +4010,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
if (check_limiting_mode) {
u8 options[SFP_EEPROM_OPTIONS_SIZE];
- if (bnx2x_read_sfp_module_eeprom(params,
- SFP_EEPROM_OPTIONS_ADDR,
- SFP_EEPROM_OPTIONS_SIZE,
- options) != 0) {
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
+ SFP_EEPROM_OPTIONS_ADDR,
+ SFP_EEPROM_OPTIONS_SIZE,
+ options) != 0) {
DP(NETIF_MSG_LINK, "Failed to read Option"
" field from module EEPROM\n");
return -EINVAL;
@@ -2939,17 +4027,17 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
return 0;
}
-
/* This function read the relevant field from the module ( SFP+ ),
and verify it is compliant with this board */
-static u8 bnx2x_verify_sfp_module(struct link_params *params)
+static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u32 val;
- u32 fw_resp;
+ u32 val, cmd;
+ u32 fw_resp, fw_cmd_param;
char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
-
+ phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].config));
@@ -2959,29 +4047,43 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
return 0;
}
- /* Ask the FW to validate the module */
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+ /* Use specific phy request */
+ cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+ } else if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+ /* Use first phy request only in case of non-dual media*/
+ if (DUAL_MEDIA(params)) {
+ DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
+ "verification\n");
+ return -EINVAL;
+ }
+ cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+ } else {
+ /* No support in OPT MDL detection */
DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
- "verification\n");
+ "verification\n");
return -EINVAL;
}
-
- fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL);
+ fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+ fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
DP(NETIF_MSG_LINK, "Approved module\n");
return 0;
}
/* format the warning message */
- if (bnx2x_read_sfp_module_eeprom(params,
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
- if (bnx2x_read_sfp_module_eeprom(params,
+ if (bnx2x_read_sfp_module_eeprom(phy,
+ params,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -2989,22 +4091,78 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
+ netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
params->port, vendor_name, vendor_pn);
+ phy->flags |= FLAGS_SFP_NOT_APPROVED;
return -EINVAL;
}
-static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
- u16 edc_mode)
+static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+ struct link_params *params)
+
{
+ u8 val;
struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
+ u16 timeout;
+ /* Initialization time after hot-plug may take up to 300ms for some
+ phys type ( e.g. JDSU ) */
+ for (timeout = 0; timeout < 60; timeout++) {
+ if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
+ == 0) {
+ DP(NETIF_MSG_LINK, "SFP+ module initialization "
+ "took %d ms\n", timeout * 5);
+ return 0;
+ }
+ msleep(5);
+ }
+ return -EINVAL;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 is_power_up) {
+ /* Make sure GPIOs are not using for LED mode */
+ u16 val;
+ /*
+ * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+ * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+ * output
+ * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
+ * where the 1st bit is the over-current(only input), and 2nd bit is
+ * for power( only output )
+ */
+
+ /*
+ * In case of NOC feature is disabled and power is up, set GPIO control
+ * as input to enable listening of over-current indication
+ */
+ if (phy->flags & FLAGS_NOC)
+ return;
+ if (!(phy->flags &
+ FLAGS_NOC) && is_power_up)
+ val = (1<<4);
+ else
+ /*
+ * Set GPIO control to OUTPUT, and set the power bit
+ * to according to the is_power_up
+ */
+ val = ((!(is_power_up)) << 1);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+
+static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
u16 cur_limiting_mode;
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
&cur_limiting_mode);
@@ -3014,12 +4172,10 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
if (edc_mode == EDC_MODE_LIMITING) {
DP(NETIF_MSG_LINK,
"Setting LIMITING MODE\n");
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- EDC_MODE_LIMITING);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ EDC_MODE_LIMITING);
} else { /* LRM mode ( default )*/
DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
@@ -3030,27 +4186,19 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
if (cur_limiting_mode != EDC_MODE_LIMITING)
return 0;
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_LRM_MODE,
0);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
0x128);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_MISC_CTRL0,
0x4008);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_LRM_MODE,
0xaaaa);
@@ -3058,46 +4206,33 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
return 0;
}
-static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
+static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
u16 edc_mode)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
u16 phy_identifier;
u16 rom_ver2_val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
-
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
&phy_identifier);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
(phy_identifier & ~(1<<9)));
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
&rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
(rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER,
(phy_identifier | (1<<9)));
@@ -3105,72 +4240,34 @@ static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
return 0;
}
-
-static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params)
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
{
- u8 val;
struct bnx2x *bp = params->bp;
- u16 timeout;
- /* Initialization time after hot-plug may take up to 300ms for some
- phys type ( e.g. JDSU ) */
- for (timeout = 0; timeout < 60; timeout++) {
- if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val)
- == 0) {
- DP(NETIF_MSG_LINK, "SFP+ module initialization "
- "took %d ms\n", timeout * 5);
- return 0;
- }
- msleep(5);
- }
- return -EINVAL;
-}
-static void bnx2x_8727_power_module(struct bnx2x *bp,
- struct link_params *params,
- u8 ext_phy_addr, u8 is_power_up) {
- /* Make sure GPIOs are not using for LED mode */
- u16 val;
- u8 port = params->port;
- /*
- * In the GPIO register, bit 4 is use to detemine if the GPIOs are
- * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
- * output
- * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
- * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
- * where the 1st bit is the over-current(only input), and 2nd bit is
- * for power( only output )
- */
-
- /*
- * In case of NOC feature is disabled and power is up, set GPIO control
- * as input to enable listening of over-current indication
- */
-
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_BCM8727_NOC) && is_power_up)
- val = (1<<4);
- else
- /*
- * Set GPIO control to OUTPUT, and set the power bit
- * to according to the is_power_up
- */
- val = ((!(is_power_up)) << 1);
-
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_GPIO_CTRL,
- val);
+ switch (action) {
+ case DISABLE_TX:
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ break;
+ case ENABLE_TX:
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+ action);
+ return;
+ }
}
-static u8 bnx2x_sfp_module_detection(struct link_params *params)
+static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
u8 rc = 0;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
+
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].config));
@@ -3178,10 +4275,10 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
params->port);
- if (bnx2x_get_edc_mode(params, &edc_mode) != 0) {
+ if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
- } else if (bnx2x_verify_sfp_module(params) !=
+ } else if (bnx2x_verify_sfp_module(phy, params) !=
0) {
/* check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
@@ -3190,13 +4287,12 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
MISC_REGISTERS_GPIO_HIGH,
params->port);
- if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
/* Shutdown SFP+ module */
DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
- bnx2x_8727_power_module(bp, params,
- ext_phy_addr, 0);
+ bnx2x_8727_power_module(bp, phy, 0);
return rc;
}
} else {
@@ -3208,15 +4304,15 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
}
/* power up the SFP module */
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
- bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
+ bnx2x_8727_power_module(bp, phy, 1);
/* Check and set limiting mode / LRM mode on 8726.
On 8727 it is done automatically */
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
- bnx2x_bcm8726_set_limiting_mode(params, edc_mode);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
+ bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
else
- bnx2x_bcm8727_set_limiting_mode(params, edc_mode);
+ bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
/*
* Enable transmit for this module if the module is approved, or
* if unapproved modules should also enable the Tx laser
@@ -3224,11 +4320,9 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
if (rc == 0 ||
(val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, params->port,
- ext_phy_type, ext_phy_addr, 1);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
else
- bnx2x_sfp_set_transmitter(bp, params->port,
- ext_phy_type, ext_phy_addr, 0);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
return rc;
}
@@ -3236,6 +4330,7 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
void bnx2x_handle_module_detect_int(struct link_params *params)
{
struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
u32 gpio_val;
u8 port = params->port;
@@ -3245,1349 +4340,587 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
params->port);
/* Get current gpio val refelecting module plugged in / out*/
- gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
+ gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
- port);
+ MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+ port);
- if (bnx2x_wait_for_sfp_module_initialized(params) ==
- 0)
- bnx2x_sfp_module_detection(params);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
} else {
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
-
- u32 ext_phy_type =
- XGXS_EXT_PHY_TYPE(params->ext_phy_config);
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
- port);
+ MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+ port);
/* Module was plugged out. */
/* Disable transmit for this module */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, params->port,
- ext_phy_type, ext_phy_addr, 0);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
}
}
-static void bnx2x_bcm807x_force_10G(struct link_params *params)
+/******************************************************************/
+/* common BCM8706/BCM8726 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u8 link_up = 0;
+ u16 val1, val2, rx_sd, pcs_status;
struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* Force KR or KX */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 0x2040);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2,
- 0x000b);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_BCM_CTRL,
- 0x0000);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL,
- 0x0000);
-}
-
-static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u16 val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
-
- if (val == 0) {
- /* Mustn't set low power mode in 8073 A0 */
- return;
+ DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+ /* Clear RX Alarm*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+ /* clear LASI indication*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+ DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+ DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ " link_status 0x%x\n", rx_sd, pcs_status, val2);
+ /* link is up if both bit 0 of pmd_rx_sd and
+ * bit 0 of pcs_status are set, or if the autoneg bit
+ * 1 is set
+ */
+ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+ if (link_up) {
+ if (val2 & (1<<1))
+ vars->line_speed = SPEED_1000;
+ else
+ vars->line_speed = SPEED_10000;
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
}
-
- /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD,
- MDIO_XS_PLL_SEQUENCER, &val);
- val &= ~(1<<13);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
- /* PLL controls */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805E, 0x1077);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805D, 0x0000);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805C, 0x030B);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805B, 0x1240);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x805A, 0x2490);
-
- /* Tx Controls */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80A7, 0x0C74);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80A6, 0x9041);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80A5, 0x4640);
-
- /* Rx Controls */
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80FE, 0x01C4);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80FD, 0x9249);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, 0x80FC, 0x2015);
-
- /* Enable PLL sequencer (use read-modify-write to set bit 13) */
- bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD,
- MDIO_XS_PLL_SEQUENCER, &val);
- val |= (1<<13);
- bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+ return link_up;
}
-static void bnx2x_8073_set_pause_cl37(struct link_params *params,
- struct link_vars *vars)
+/******************************************************************/
+/* BCM8706 PHY SECTION */
+/******************************************************************/
+static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u16 cnt, val;
struct bnx2x *bp = params->bp;
- u16 cl37_val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, &cl37_val);
-
- cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+ bnx2x_wait_reset_complete(bp, phy);
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+ /* Wait until fw is loaded */
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+ if (val)
+ break;
+ msleep(10);
}
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ u8 i;
+ u16 reg;
+ for (i = 0; i < 4; i++) {
+ reg = MDIO_XS_8706_REG_BANK_RX0 +
+ i*(MDIO_XS_8706_REG_BANK_RX1 -
+ MDIO_XS_8706_REG_BANK_RX0);
+ bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+ /* Clear first 3 bits of the control */
+ val &= ~0x7;
+ /* Set control bits according to configuration */
+ val |= (phy->rx_preemphasis[i] & 0x7);
+ DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+ " reg 0x%x <-- val 0x%x\n", reg, val);
+ bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+ }
}
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
- cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ /* Force speed */
+ if (phy->req_line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+ } else {
+ /* Force 1Gbps using autoneg with 1G advertisment */
+
+ /* Allow CL37 through CL73 */
+ DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+ /* Enable Full-Duplex advertisment on CL37 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+ /* Enable CL37 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ /* 1G support */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+ /* Enable clause 73 AN */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ 0x0400);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+ 0x0004);
}
- DP(NETIF_MSG_LINK,
- "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+ return 0;
+}
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, cl37_val);
- msleep(500);
+static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ return bnx2x_8706_8726_read_status(phy, params, vars);
}
-static void bnx2x_ext_phy_set_pause(struct link_params *params,
- struct link_vars *vars)
+/******************************************************************/
+/* BCM8726 PHY SECTION */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u16 val;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* read modify write pause advertizing */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &val);
-
- val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
-
- /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
-
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
- }
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
- val |=
- MDIO_AN_REG_ADV_PAUSE_PAUSE;
- }
- DP(NETIF_MSG_LINK,
- "Ext phy AN advertize 0x%x\n", val);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, val);
+ DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
}
-static void bnx2x_set_preemphasis(struct link_params *params)
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+ struct link_params *params)
{
- u16 bank, i = 0;
struct bnx2x *bp = params->bp;
+ /* Need to wait 100ms after reset */
+ msleep(100);
- for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
- bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
- bank,
- MDIO_RX0_RX_EQ_BOOST,
- params->xgxs_config_rx[i]);
- }
-
- for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
- bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
- CL45_WR_OVER_CL22(bp, params->port,
- params->phy_addr,
- bank,
- MDIO_TX0_TX_DRIVER,
- params->xgxs_config_tx[i]);
- }
-}
+ /* Micro controller re-boot */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+ /* Set soft reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
-static void bnx2x_8481_set_led4(struct link_params *params,
- u32 ext_phy_type, u8 ext_phy_addr)
-{
- struct bnx2x *bp = params->bp;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
- /* PHYC_CTL_LED_CTL */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482);
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* Unmask LED4 for 10G link */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
+ /* wait for 150ms for microcode load */
+ msleep(150);
+
+ /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6));
- /* 'Interrupt Mask' */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- 0xFFFB, 0xFFFD);
-}
-static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
- u32 ext_phy_type, u8 ext_phy_addr)
-{
- struct bnx2x *bp = params->bp;
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */
- /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_SHADOW,
- (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
+ msleep(200);
+ bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
}
-static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
- u32 ext_phy_type, u8 ext_phy_addr)
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val1;
-
- /* LED1 (10G Link) */
- /* Enable continuse based on source 7(10G-link) */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- &val1);
- /* Set bit 2 to 0, and bits [1:0] to 10 */
- val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
- val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- val1);
-
- /* Unmask LED1 for 10G link */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- &val1);
- /* Set bit 2 to 0, and bits [1:0] to 10 */
- val1 |= (1<<7);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- val1);
-
- /* LED2 (1G/100/10G Link) */
- /* Mask LED2 for 10G link */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0);
-
- /* Unmask LED3 for 10G link */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x6);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_BLINK,
- 0);
+ u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ link_up = 0;
+ vars->line_speed = 0;
+ }
+ }
+ return link_up;
}
-static void bnx2x_init_internal_phy(struct link_params *params,
- struct link_vars *vars,
- u8 enable_cl73)
+static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
+ u32 val;
+ u32 swap_val, swap_override, aeu_gpio_mask, offset;
+ DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
- if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
- if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
- bnx2x_set_preemphasis(params);
-
- /* forced speed requested? */
- if (vars->line_speed != SPEED_AUTO_NEG ||
- ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- params->loopback_mode == LOOPBACK_EXT)) {
- DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
-
- /* disable autoneg */
- bnx2x_set_autoneg(params, vars, 0);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ bnx2x_wait_reset_complete(bp, phy);
+
+ bnx2x_8726_external_rom_boot(phy, params);
+
+ /* Need to call module detected on initialization since
+ the module detection triggered by actual module
+ insertion might occur before driver is loaded, and when
+ driver is loaded, it reset all registers, including the
+ transmitter */
+ bnx2x_sfp_module_detection(phy, params);
+
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ 0x400);
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ /* Set Flow control */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+ /* Enable RX-ALARM control to receive
+ interrupt for 1G speed change */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ 0x400);
+
+ } else { /* Default 10G. Set only LASI control */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+ }
- /* program speed and duplex */
- bnx2x_program_serdes(params, vars);
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
+ "TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TX_CTRL2,
+ phy->tx_preemphasis[1]);
+ }
- } else { /* AN_mode */
- DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
- /* AN enabled */
- bnx2x_set_brcm_cl37_advertisment(params);
+ /* The GPIO should be swapped if the swap register is set and active */
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- /* program duplex & pause advertisement (for aneg) */
- bnx2x_set_ieee_aneg_advertisment(params,
- vars->ieee_fc);
+ /* Select function upon port-swap configuration */
+ if (params->port == 0) {
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ aeu_gpio_mask = (swap_val && swap_override) ?
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
+ } else {
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+ aeu_gpio_mask = (swap_val && swap_override) ?
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
+ AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
+ }
+ val = REG_RD(bp, offset);
+ /* add GPIO3 to group */
+ val |= aeu_gpio_mask;
+ REG_WR(bp, offset, val);
+ return 0;
- /* enable autoneg */
- bnx2x_set_autoneg(params, vars, enable_cl73);
+}
- /* enable and restart AN */
- bnx2x_restart_autoneg(params, enable_cl73);
- }
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+ /* Set serial boot control for external load */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
- } else { /* SGMII mode */
- DP(NETIF_MSG_LINK, "SGMII\n");
+/******************************************************************/
+/* BCM8727 PHY SECTION */
+/******************************************************************/
- bnx2x_initialize_sgmii_process(params, vars);
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 led_mode_bitmask = 0;
+ u16 gpio_pins_bitmask = 0;
+ u16 val;
+ /* Only NOC flavor requires to set the LED specifically */
+ if (!(phy->flags & FLAGS_NOC))
+ return;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x03;
+ break;
+ case LED_MODE_ON:
+ led_mode_bitmask = 0;
+ gpio_pins_bitmask = 0x02;
+ break;
+ case LED_MODE_OPER:
+ led_mode_bitmask = 0x60;
+ gpio_pins_bitmask = 0x11;
+ break;
}
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val &= 0xff8f;
+ val |= led_mode_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val);
+ val &= 0xffe0;
+ val |= gpio_pins_bitmask;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_GPIO_CTRL,
+ val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ u32 swap_val, swap_override;
+ u8 port;
+ /**
+ * The PHY reset is controlled by GPIO 1. Fake the port number
+ * to cancel the swap done in set_gpio()
+ */
+ struct bnx2x *bp = params->bp;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port = (swap_val && swap_override) ^ 1;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
-static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
+static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u16 tmp1, val, mod_abs;
+ u16 rx_alarm_ctrl_val;
+ u16 lasi_ctrl_val;
struct bnx2x *bp = params->bp;
- u32 ext_phy_type;
- u8 ext_phy_addr;
- u16 cnt;
- u16 ctrl = 0;
- u16 val = 0;
- u8 rc = 0;
-
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
-
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- /* Make sure that the soft reset is off (expect for the 8072:
- * due to the lock, it will be done inside the specific
- * handling)
+ /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+ bnx2x_wait_reset_complete(bp, phy);
+ rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
+ lasi_ctrl_val = 0x0004;
+
+ DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+ /* enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ rx_alarm_ctrl_val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
+
+ /* Initially configure MOD_ABS to interrupt when
+ module is presence( bit 8) */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ /* Set EDC off by setting OPTXLOS signal input to low
+ (bit 9).
+ When the EDC is off it locks onto a reference clock and
+ avoids becoming 'lost'.*/
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+
+ /**
+ * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+
+ bnx2x_8727_power_module(bp, phy, 1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
+
+ /* Set option 1G speed */
+ if (phy->req_line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /**
+ * Power down the XAUI until link is up in case of dual-media
+ * and 1G
*/
- if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) {
- /* Wait for soft reset to get cleared upto 1 sec */
- for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, &ctrl);
- if (!(ctrl & (1<<15)))
- break;
- msleep(1);
- }
- DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
- ctrl, cnt);
- }
-
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- DP(NETIF_MSG_LINK, "XGXS 8705\n");
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL,
- 0x8288);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- 0x7fbf);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CMU_PLL_BYPASS,
- 0x0100);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_WIS_DEVAD,
- MDIO_WIS_REG_LASI_CNTL, 0x1);
-
- /* BCM8705 doesn't have microcode, hence the 0 */
- bnx2x_save_spirom_version(bp, params->port,
- params->shmem_base, 0);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- /* Wait until fw is loaded */
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &val);
- if (val)
- break;
- msleep(10);
- }
- DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
- "after %d ms\n", cnt);
- if ((params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- u8 i;
- u16 reg;
- for (i = 0; i < 4; i++) {
- reg = MDIO_XS_8706_REG_BANK_RX0 +
- i*(MDIO_XS_8706_REG_BANK_RX1 -
- MDIO_XS_8706_REG_BANK_RX0);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_XS_DEVAD,
- reg, &val);
- /* Clear first 3 bits of the control */
- val &= ~0x7;
- /* Set control bits according to
- configuation */
- val |= (params->xgxs_config_rx[i] &
- 0x7);
- DP(NETIF_MSG_LINK, "Setting RX"
- "Equalizer to BCM8706 reg 0x%x"
- " <-- val 0x%x\n", reg, val);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_XS_DEVAD,
- reg, val);
- }
- }
- /* Force speed */
- if (params->req_line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_DIGITAL_CTRL,
- 0x400);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 1);
- } else {
- /* Force 1Gbps using autoneg with 1G
- advertisment */
-
- /* Allow CL37 through CL73 */
- DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_CL73,
- 0x040c);
-
- /* Enable Full-Duplex advertisment on CL37 */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LP,
- 0x0020);
- /* Enable CL37 AN */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN,
- 0x1000);
- /* 1G support */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV, (1<<5));
-
- /* Enable clause 73 AN */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL,
- 0x1200);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- 0x0400);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x0004);
-
- }
- bnx2x_save_bcm_spirom_ver(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- params->shmem_base);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- bnx2x_bcm8726_external_rom_boot(params);
-
- /* Need to call module detected on initialization since
- the module detection triggered by actual module
- insertion might occur before driver is loaded, and when
- driver is loaded, it reset all registers, including the
- transmitter */
- bnx2x_sfp_module_detection(params);
-
- /* Set Flow control */
- bnx2x_ext_phy_set_pause(params, vars);
- if (params->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x5);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- 0x400);
- } else if ((params->req_line_speed ==
- SPEED_AUTO_NEG) &&
- ((params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV, 0x20);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_CL73, 0x040c);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, 0x0020);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN, 0x1000);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x1200);
-
- /* Enable RX-ALARM control to receive
- interrupt for 1G speed change */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x4);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- 0x400);
-
- } else { /* Default 10G. Set only LASI control */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 1);
- }
-
- /* Set TX PreEmphasis if needed */
- if ((params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
- "TX_CTRL2 0x%x\n",
- params->xgxs_config_tx[0],
- params->xgxs_config_tx[1]);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TX_CTRL1,
- params->xgxs_config_tx[0]);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TX_CTRL2,
- params->xgxs_config_tx[1]);
- }
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- {
- u16 tmp1;
- u16 rx_alarm_ctrl_val;
- u16 lasi_ctrl_val;
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
- rx_alarm_ctrl_val = 0x400;
- lasi_ctrl_val = 0x0004;
- } else {
- rx_alarm_ctrl_val = (1<<2);
- lasi_ctrl_val = 0x0004;
- }
-
- /* enable LASI */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- rx_alarm_ctrl_val);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL,
- lasi_ctrl_val);
-
- bnx2x_8073_set_pause_cl37(params, vars);
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
- bnx2x_bcm8072_external_rom_boot(params);
- else
- /* In case of 8073 with long xaui lines,
- don't set the 8073 xaui low power*/
- bnx2x_bcm8073_set_xaui_low_power_mode(params);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &tmp1);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &tmp1);
-
- DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
- "0x%x\n", tmp1);
-
- /* If this is forced speed, set to KR or KX
- * (all other are not supported)
- */
- if (params->loopback_mode == LOOPBACK_EXT) {
- bnx2x_bcm807x_force_10G(params);
- DP(NETIF_MSG_LINK,
- "Forced speed 10G on 807X\n");
- break;
- } else {
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_BCM_CTRL,
- 0x0002);
- }
- if (params->req_line_speed != SPEED_AUTO_NEG) {
- if (params->req_line_speed == SPEED_10000) {
- val = (1<<7);
- } else if (params->req_line_speed ==
- SPEED_2500) {
- val = (1<<5);
- /* Note that 2.5G works only
- when used with 1G advertisment */
- } else
- val = (1<<5);
- } else {
-
- val = 0;
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
- val |= (1<<7);
-
- /* Note that 2.5G works only when
- used with 1G advertisment */
- if (params->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
- val |= (1<<5);
- DP(NETIF_MSG_LINK,
- "807x autoneg val = 0x%x\n", val);
- }
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV, val);
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_2_5G, &tmp1);
-
- if (((params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
- (params->req_line_speed ==
- SPEED_AUTO_NEG)) ||
- (params->req_line_speed ==
- SPEED_2500)) {
- u16 phy_ver;
- /* Allow 2.5G for A1 and above */
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr,
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &phy_ver);
- DP(NETIF_MSG_LINK, "Add 2.5G\n");
- if (phy_ver > 0)
- tmp1 |= 1;
- else
- tmp1 &= 0xfffe;
- } else {
- DP(NETIF_MSG_LINK, "Disable 2.5G\n");
- tmp1 &= 0xfffe;
- }
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_2_5G, tmp1);
- }
-
- /* Add support for CL37 (passive mode) II */
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD,
- &tmp1);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_FC_LD, (tmp1 |
- ((params->req_duplex == DUPLEX_FULL) ?
- 0x20 : 0x40)));
-
- /* Add support for CL37 (passive mode) III */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN, 0x1000);
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
- /* The SNR will improve about 2db by changing
- BW and FEE main tap. Rest commands are executed
- after link is up*/
- /*Change FFE main cursor to 5 in EDC register*/
- if (bnx2x_8073_is_snr_needed(params))
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN,
- 0xFB0C);
-
- /* Enable FEC (Forware Error Correction)
- Request in the AN */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, &tmp1);
-
- tmp1 |= (1<<15);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV2, tmp1);
-
- }
-
- bnx2x_ext_phy_set_pause(params, vars);
-
- /* Restart autoneg */
- msleep(500);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x1200);
- DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
- "Advertise 1G=%x, 10G=%x\n",
- ((val & (1<<5)) > 0),
- ((val & (1<<7)) > 0));
- break;
- }
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- {
- u16 tmp1;
- u16 rx_alarm_ctrl_val;
- u16 lasi_ctrl_val;
-
- /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
-
- u16 mod_abs;
- rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
- lasi_ctrl_val = 0x0004;
-
- DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- rx_alarm_ctrl_val);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL,
- lasi_ctrl_val);
-
- /* Initially configure MOD_ABS to interrupt when
- module is presence( bit 8) */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
- /* Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
- mod_abs &= ~((1<<8) | (1<<9));
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
-
- /* Make MOD_ABS give interrupt on change */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- &val);
- val |= (1<<12);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- val);
-
- /* Set 8727 GPIOs to input to allow reading from the
- 8727 GPIO0 status which reflect SFP+ module
- over-current */
-
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- &val);
- val &= 0xff8f; /* Reset bits 4-6 */
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- val);
-
- bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
- bnx2x_bcm8073_set_xaui_low_power_mode(params);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &tmp1);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &tmp1);
-
- /* Set option 1G speed */
- if (params->req_line_speed == SPEED_1000) {
-
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-
- } else if ((params->req_line_speed ==
- SPEED_AUTO_NEG) &&
- ((params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_PMA_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin,
- need to set the 10G registers although it is
- default */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x0020);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_AN_DEVAD,
- 0x7, 0x0100);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_10G_CTRL2, 0x0008);
- }
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
-
- /* Set TX PreEmphasis if needed */
- if ((params->feature_config_flags &
- FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
- "TX_CTRL2 0x%x\n",
- params->xgxs_config_tx[0],
- params->xgxs_config_tx[1]);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TX_CTRL1,
- params->xgxs_config_tx[0]);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TX_CTRL2,
- params->xgxs_config_tx[1]);
- }
-
- break;
- }
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- {
- u16 fw_ver1, fw_ver2;
- DP(NETIF_MSG_LINK,
- "Setting the SFX7101 LASI indication\n");
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_CTRL, 0x1);
- DP(NETIF_MSG_LINK,
- "Setting the SFX7101 LED to blink on traffic\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
-
- bnx2x_ext_phy_set_pause(params, vars);
- /* Restart autoneg */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, &val);
- val |= 0x200;
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, val);
-
- /* Save spirom version */
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_VER1, &fw_ver1);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_VER2, &fw_ver2);
-
- bnx2x_save_spirom_version(params->bp, params->port,
- params->shmem_base,
- (u32)(fw_ver1<<16 | fw_ver2));
- break;
+ MDIO_PMA_REG_8727_PCS_GP, val);
}
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- /* This phy uses the NIG latch mechanism since link
- indication arrives through its LED4 and not via
- its LASI signal, so we get steady signal
- instead of clear on read */
- bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
- 1 << NIG_LATCH_BC_ENABLE_MI_INT);
-
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x0000);
-
- bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
- if (params->req_line_speed == SPEED_AUTO_NEG) {
-
- u16 autoneg_val, an_1000_val, an_10_100_val;
- /* set 1000 speed advertisement */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_1000T_CTRL,
- &an_1000_val);
-
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
- an_1000_val |= (1<<8);
- if (params->req_duplex == DUPLEX_FULL)
- an_1000_val |= (1<<9);
- DP(NETIF_MSG_LINK, "Advertising 1G\n");
- } else
- an_1000_val &= ~((1<<8) | (1<<9));
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_1000T_CTRL,
- an_1000_val);
-
- /* set 100 speed advertisement */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_AN_ADV,
- &an_10_100_val);
-
- if (params->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
- an_10_100_val |= (1<<7);
- if (params->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK,
- "Advertising 100M\n");
- } else
- an_10_100_val &= ~((1<<7) | (1<<8));
-
- /* set 10 speed advertisement */
- if (params->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
- an_10_100_val |= (1<<5);
- if (params->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
- }
- else
- an_10_100_val &= ~((1<<5) | (1<<6));
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_AN_ADV,
- an_10_100_val);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- &autoneg_val);
-
- /* Disable forced speed */
- autoneg_val &= ~(1<<6|1<<13);
-
- /* Enable autoneg and restart autoneg
- for legacy speeds */
- autoneg_val |= (1<<9|1<<12);
-
- if (params->req_duplex == DUPLEX_FULL)
- autoneg_val |= (1<<8);
- else
- autoneg_val &= ~(1<<8);
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- autoneg_val);
-
- if (params->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
- DP(NETIF_MSG_LINK, "Advertising 10G\n");
- /* Restart autoneg for 10G*/
-
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x3200);
- }
- } else {
- /* Force speed */
- u16 autoneg_ctrl, pma_ctrl;
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- &autoneg_ctrl);
-
- /* Disable autoneg */
- autoneg_ctrl &= ~(1<<12);
-
- /* Set 1000 force */
- switch (params->req_line_speed) {
- case SPEED_10000:
- DP(NETIF_MSG_LINK,
- "Unable to set 10G force !\n");
- break;
- case SPEED_1000:
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- &pma_ctrl);
- autoneg_ctrl &= ~(1<<13);
- autoneg_ctrl |= (1<<6);
- pma_ctrl &= ~(1<<13);
- pma_ctrl |= (1<<6);
- DP(NETIF_MSG_LINK,
- "Setting 1000M force\n");
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- pma_ctrl);
- break;
- case SPEED_100:
- autoneg_ctrl |= (1<<13);
- autoneg_ctrl &= ~(1<<6);
- DP(NETIF_MSG_LINK,
- "Setting 100M force\n");
- break;
- case SPEED_10:
- autoneg_ctrl &= ~(1<<13);
- autoneg_ctrl &= ~(1<<6);
- DP(NETIF_MSG_LINK,
- "Setting 10M force\n");
- break;
- }
-
- /* Duplex mode */
- if (params->req_duplex == DUPLEX_FULL) {
- autoneg_ctrl |= (1<<8);
- DP(NETIF_MSG_LINK,
- "Setting full duplex\n");
- } else
- autoneg_ctrl &= ~(1<<8);
-
- /* Update autoneg ctrl and pma ctrl */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_LEGACY_MII_CTRL,
- autoneg_ctrl);
- }
-
- /* Save spirom version */
- bnx2x_save_8481_spirom_version(bp, params->port,
- ext_phy_addr,
- params->shmem_base);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- DP(NETIF_MSG_LINK,
- "XGXS PHY Failure detected 0x%x\n",
- params->ext_phy_config);
- rc = -EINVAL;
- break;
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- params->ext_phy_config);
- rc = -EINVAL;
- break;
- }
-
- } else { /* SerDes */
-
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "SerDes Direct\n");
- break;
-
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- DP(NETIF_MSG_LINK, "SerDes 5482\n");
- break;
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /**
+ * Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
- default:
- DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
- params->ext_phy_config);
- break;
- }
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ 0xa001);
+
+ /* Set TX PreEmphasis if needed */
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+ DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+ phy->tx_preemphasis[0],
+ phy->tx_preemphasis[1]);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+ phy->tx_preemphasis[0]);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+ phy->tx_preemphasis[1]);
}
- return rc;
+
+ return 0;
}
-static void bnx2x_8727_handle_mod_abs(struct link_params *params)
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 mod_abs, rx_alarm_status;
- u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1<<8)) {
@@ -4602,18 +4935,16 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
(bit 9).
When the EDC is off it locks onto a reference clock and
avoids becoming 'lost'.*/
- mod_abs &= ~((1<<8)|(1<<9));
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ mod_abs &= ~(1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs &= ~(1<<9);
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
/* Clear RX alarm since it stays up as long as
the mod_abs wasn't changed */
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
+ bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -4630,33 +4961,28 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
2. Restore the default polarity of the OPRXLOS signal and
this signal will then correctly indicate the presence or
absence of the Rx signal. (bit 9) */
- mod_abs |= ((1<<8)|(1<<9));
- bnx2x_cl45_write(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ mod_abs |= (1<<8);
+ if (!(phy->flags & FLAGS_NOC))
+ mod_abs |= (1<<9);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
/* Clear RX alarm since it stays up as long as
the mod_abs wasn't changed. This is need to be done
before calling the module detection, otherwise it will clear
the link update alarm */
- bnx2x_cl45_read(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, params->port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr, 0);
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
- if (bnx2x_wait_for_sfp_module_initialized(params)
- == 0)
- bnx2x_sfp_module_detection(params);
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
}
@@ -4667,1298 +4993,1711 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
module plugged in/out */
}
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
-static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
- struct link_vars *vars,
- u8 is_mi_int)
{
struct bnx2x *bp = params->bp;
- u32 ext_phy_type;
- u8 ext_phy_addr;
- u16 val1 = 0, val2;
- u16 rx_sd, pcs_status;
- u8 ext_phy_link_up = 0;
- u8 port = params->port;
+ u8 link_up = 0;
+ u16 link_status = 0;
+ u16 rx_alarm_status, lasi_ctrl, val1;
+
+ /* If PHY is not initialized, do not check link status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+ &lasi_ctrl);
+ if (!lasi_ctrl)
+ return 0;
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "XGXS Direct\n");
- ext_phy_link_up = 1;
- break;
+ /* Check the LASI */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
+ &rx_alarm_status);
+ vars->line_speed = 0;
+ DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- DP(NETIF_MSG_LINK, "XGXS 8705\n");
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_WIS_DEVAD,
- MDIO_WIS_REG_LASI_STATUS, &val1);
- DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_WIS_DEVAD,
- MDIO_WIS_REG_LASI_STATUS, &val1);
- DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_SD, &rx_sd);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- 1,
- 0xc809, &val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- 1,
- 0xc809, &val1);
-
- DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
- ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
- ((val1 & (1<<8)) == 0));
- if (ext_phy_link_up)
- vars->line_speed = SPEED_10000;
- break;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
- /* Clear RX Alarm*/
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
- &val2);
- /* clear LASI indication*/
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
- &val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
- &val2);
- DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
- "0x%x\n", val1, val2);
-
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
- &rx_sd);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
- &pcs_status);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
- &val2);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
- &val2);
-
- DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
- " pcs_status 0x%x 1Gbps link_status 0x%x\n",
- rx_sd, pcs_status, val2);
- /* link is up if both bit 0 of pmd_rx_sd and
- * bit 0 of pcs_status are set, or if the autoneg bit
- 1 is set
- */
- ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
- (val2 & (1<<1)));
- if (ext_phy_link_up) {
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
- /* If transmitter is disabled,
- ignore false link up indication */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val1);
- if (val1 & (1<<15)) {
- DP(NETIF_MSG_LINK, "Tx is "
- "disabled\n");
- ext_phy_link_up = 0;
- break;
- }
- }
- if (val2 & (1<<1))
- vars->line_speed = SPEED_1000;
- else
- vars->line_speed = SPEED_10000;
- }
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- {
- u16 link_status = 0;
- u16 rx_alarm_status;
- /* Check the LASI */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
-
- DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
- rx_alarm_status);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
- DP(NETIF_MSG_LINK,
- "8727 LASI status 0x%x\n",
- val1);
+ /* Clear MSG-OUT */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
- /* Clear MSG-OUT */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &val1);
+ /**
+ * If a module is present and there is need to check
+ * for over current
+ */
+ if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+ /* Check over-current using 8727 GPIO0 input*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+ &val1);
+
+ if ((val1 & (1<<8)) == 0) {
+ DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
+ " on port %d\n", params->port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ params->port);
/*
- * If a module is present and there is need to check
- * for over current
+ * Disable all RX_ALARMs except for
+ * mod_abs
*/
- if (!(params->feature_config_flags &
- FEATURE_CONFIG_BCM8727_NOC) &&
- !(rx_alarm_status & (1<<5))) {
- /* Check over-current using 8727 GPIO0 input*/
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_GPIO_CTRL,
- &val1);
-
- if ((val1 & (1<<8)) == 0) {
- DP(NETIF_MSG_LINK, "8727 Power fault"
- " has been detected on "
- "port %d\n",
- params->port);
- netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
- params->port);
- /*
- * Disable all RX_ALARMs except for
- * mod_abs
- */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- (1<<5));
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val1);
- /* Wait for module_absent_event */
- val1 |= (1<<8);
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- val1);
- /* Clear RX alarm */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM,
- &rx_alarm_status);
- break;
- }
- } /* Over current check */
-
- /* When module absent bit is set, check module */
- if (rx_alarm_status & (1<<5)) {
- bnx2x_8727_handle_mod_abs(params);
- /* Enable all mod_abs and link detection bits */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL,
- ((1<<5) | (1<<2)));
- }
-
- /* If transmitter is disabled,
- ignore false link up indication */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val1);
- if (val1 & (1<<15)) {
- DP(NETIF_MSG_LINK, "Tx is disabled\n");
- ext_phy_link_up = 0;
- break;
- }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &link_status);
-
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
- if ((link_status & (1<<2)) &&
- (!(link_status & (1<<15)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_10000;
- } else if ((link_status & (1<<0)) &&
- (!(link_status & (1<<13)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_1000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 1G\n", params->port);
- } else {
- ext_phy_link_up = 0;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " is down\n", params->port);
- }
- break;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ /* Wait for module_absent_event */
+ val1 |= (1<<8);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+ /* Clear RX alarm */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ return 0;
}
+ } /* Over current check */
+
+ /* When module absent bit is set, check module */
+ if (rx_alarm_status & (1<<5)) {
+ bnx2x_8727_handle_mod_abs(phy, params);
+ /* Enable all mod_abs and link detection bits */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ ((1<<5) | (1<<2)));
+ }
+ DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
+ bnx2x_8727_specific_func(phy, params, ENABLE_TX);
+ /* If transmitter is disabled, ignore false link up indication */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+ if (val1 & (1<<15)) {
+ DP(NETIF_MSG_LINK, "Tx is disabled\n");
+ return 0;
+ }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- {
- u16 link_status = 0;
- u16 an1000_status = 0;
-
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_LASI_STATUS, &val1);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_LASI_STATUS, &val2);
- DP(NETIF_MSG_LINK,
- "870x LASI status 0x%x->0x%x\n",
- val1, val2);
- } else {
- /* In 8073, port1 is directed through emac0 and
- * port0 is directed through emac1
- */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val1);
-
- DP(NETIF_MSG_LINK,
- "8703 LASI status 0x%x\n",
- val1);
- }
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
- /* clear the interrupt LASI status register */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_STATUS, &val1);
- DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
- val2, val1);
- /* Clear MSG-OUT */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_M8051_MSGOUT_REG,
- &val1);
-
- /* Check the LASI */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &val2);
-
- DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
-
- /* Check the link status */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PCS_DEVAD,
- MDIO_PCS_REG_STATUS, &val2);
- DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
-
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val1);
- ext_phy_link_up = ((val1 & 4) == 4);
- DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
- if (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
-
- if (ext_phy_link_up &&
- ((params->req_line_speed !=
- SPEED_10000))) {
- if (bnx2x_bcm8073_xaui_wa(params)
- != 0) {
- ext_phy_link_up = 0;
- break;
- }
- }
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
-
- /* Check the link status on 1.1.2 */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val1);
- DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
- "an_link_status=0x%x\n",
- val2, val1, an1000_status);
-
- ext_phy_link_up = (((val1 & 4) == 4) ||
- (an1000_status & (1<<1)));
- if (ext_phy_link_up &&
- bnx2x_8073_is_snr_needed(params)) {
- /* The SNR will improve about 2dbby
- changing the BW and FEE main tap.*/
-
- /* The 1st write to change FFE main
- tap is set before restart AN */
- /* Change PLL Bandwidth in EDC
- register */
- bnx2x_cl45_write(bp, port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PLL_BANDWIDTH,
- 0x26BC);
-
- /* Change CDR Bandwidth in EDC
- register */
- bnx2x_cl45_write(bp, port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CDR_BANDWIDTH,
- 0x0333);
- }
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &link_status);
-
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
- if ((link_status & (1<<2)) &&
- (!(link_status & (1<<15)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_10000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 10G\n", params->port);
- } else if ((link_status & (1<<1)) &&
- (!(link_status & (1<<14)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_2500;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 2.5G\n", params->port);
- } else if ((link_status & (1<<0)) &&
- (!(link_status & (1<<13)))) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_1000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 1G\n", params->port);
- } else {
- ext_phy_link_up = 0;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " is down\n", params->port);
- }
- } else {
- /* See if 1G link is up for the 8072 */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LINK_STATUS,
- &an1000_status);
- if (an1000_status & (1<<1)) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_1000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 1G\n", params->port);
- } else if (ext_phy_link_up) {
- ext_phy_link_up = 1;
- vars->line_speed = SPEED_10000;
- DP(NETIF_MSG_LINK,
- "port %x: External link"
- " up in 10G\n", params->port);
- }
- }
+ /* Bits 0..2 --> speed detected,
+ bits 13..15--> link is down */
+ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_10000;
+ } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+ link_up = 1;
+ vars->line_speed = SPEED_1000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+ params->port);
+ } else {
+ link_up = 0;
+ DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+ params->port);
+ }
+ if (link_up)
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ if ((DUAL_MEDIA(params)) &&
+ (phy->req_line_speed == SPEED_1000)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val1);
+ /**
+ * In case of dual-media board and 1G, power up the XAUI side,
+ * otherwise power it down. For 10G it is done automatically
+ */
+ if (link_up)
+ val1 &= ~(3<<10);
+ else
+ val1 |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val1);
+ }
+ return link_up;
+}
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* Disable Transmitter */
+ bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ /* Clear LASI */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LASI_STATUS, &val1);
- DP(NETIF_MSG_LINK,
- "10G-base-T LASI status 0x%x->0x%x\n",
- val2, val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val2);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_STATUS, &val1);
- DP(NETIF_MSG_LINK,
- "10G-base-T PMA status 0x%x->0x%x\n",
- val2, val1);
- ext_phy_link_up = ((val1 & 4) == 4);
- /* if link is up
- * print the AN outcome of the SFX7101 PHY
- */
- if (ext_phy_link_up) {
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_MASTER_STATUS,
- &val2);
- vars->line_speed = SPEED_10000;
- DP(NETIF_MSG_LINK,
- "SFX7101 AN status 0x%x->Master=%x\n",
- val2,
- (val2 & (1<<14)));
- }
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- /* Check 10G-BaseT link status */
- /* Check PMD signal ok */
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- 0xFFFA,
- &val1);
- bnx2x_cl45_read(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_PMD_SIGNAL,
- &val2);
- DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
-
- /* Check link 10G */
- if (val2 & (1<<11)) {
- vars->line_speed = SPEED_10000;
- ext_phy_link_up = 1;
- bnx2x_8481_set_10G_led_mode(params,
- ext_phy_type,
- ext_phy_addr);
- } else { /* Check Legacy speed link */
- u16 legacy_status, legacy_speed;
-
- /* Enable expansion register 0x42
- (Operation mode status) */
- bnx2x_cl45_write(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
- 0xf42);
-
- /* Get legacy speed operation status */
- bnx2x_cl45_read(bp, params->port,
- ext_phy_type,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
- &legacy_status);
-
- DP(NETIF_MSG_LINK, "Legacy speed status"
- " = 0x%x\n", legacy_status);
- ext_phy_link_up = ((legacy_status & (1<<11))
- == (1<<11));
- if (ext_phy_link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed =
- SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed =
- SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
-
- if (legacy_status & (1<<8))
- vars->duplex = DUPLEX_FULL;
- else
- vars->duplex = DUPLEX_HALF;
-
- DP(NETIF_MSG_LINK, "Link is up "
- "in %dMbps, is_duplex_full"
- "= %d\n",
- vars->line_speed,
- (vars->duplex == DUPLEX_FULL));
- bnx2x_8481_set_legacy_led_mode(params,
- ext_phy_type,
- ext_phy_addr);
- }
- }
- break;
- default:
- DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
- params->ext_phy_config);
- ext_phy_link_up = 0;
- break;
- }
- /* Set SGMII mode for external phy */
- if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
- if (vars->line_speed < SPEED_1000)
- vars->phy_flags |= PHY_SGMII_FLAG;
- else
- vars->phy_flags &= ~PHY_SGMII_FLAG;
- }
+}
- } else { /* SerDes */
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- DP(NETIF_MSG_LINK, "SerDes Direct\n");
- ext_phy_link_up = 1;
- break;
+/******************************************************************/
+/* BCM8481/BCM84823/BCM84833 PHY SECTION */
+/******************************************************************/
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 val, fw_ver1, fw_ver2, cnt;
+ struct bnx2x *bp = params->bp;
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- DP(NETIF_MSG_LINK, "SerDes 5482\n");
- ext_phy_link_up = 1;
+ /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
+ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, params->port, 0,
+ phy->ver_addr);
+ return;
+ }
- default:
- DP(NETIF_MSG_LINK,
- "BAD SerDes ext_phy_config 0x%x\n",
- params->ext_phy_config);
- ext_phy_link_up = 0;
+
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
break;
- }
+ udelay(5);
}
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
+ bnx2x_save_spirom_version(bp, params->port, 0,
+ phy->ver_addr);
+ return;
+ }
+
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
- return ext_phy_link_up;
+ bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
+ phy->ver_addr);
}
-static void bnx2x_link_int_enable(struct link_params *params)
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
{
- u8 port = params->port;
- u32 ext_phy_type;
- u32 mask;
- struct bnx2x *bp = params->bp;
+ u16 val;
- /* setting the status to report on link up
- for either XGXS or SerDes */
+ /* PHYC_CTL_LED_CTL */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ val &= 0xFE00;
+ val |= 0x0092;
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x18);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0040);
- if (params->switch_cfg == SWITCH_CFG_10G) {
- mask = (NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_XGXS0_LINK_STATUS);
- DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
- (ext_phy_type !=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
- mask |= NIG_MASK_MI_INT;
- DP(NETIF_MSG_LINK, "enabled external phy int\n");
- }
+ /* 'Interrupt Mask' */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ 0xFFFB, 0xFFFD);
+}
- } else { /* SerDes */
- mask = NIG_MASK_SERDES0_LINK_STATUS;
- DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- if ((ext_phy_type !=
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type !=
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
- mask |= NIG_MASK_MI_INT;
- DP(NETIF_MSG_LINK, "enabled external phy int\n");
- }
+static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 autoneg_val, an_1000_val, an_10_100_val;
+ bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+ bnx2x_848xx_set_led(bp, phy);
+
+ /* set 1000 speed advertisement */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ &an_1000_val);
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ &an_10_100_val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+ &autoneg_val);
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+ an_1000_val);
+
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
}
- bnx2x_bits_en(bp,
- NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- mask);
- DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
- (params->switch_cfg == SWITCH_CFG_10G),
- REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
- DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
- REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
- REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
- REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
- DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
-}
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == SPEED_100) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if (phy->req_line_speed == SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
-static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port,
- u8 is_mi_int)
-{
- u32 latch_status = 0, is_mi_int_status;
- /* Disable the MI INT ( external phy int )
- * by writing 1 to the status register. Link down indication
- * is high-active-signal, so in this case we need to write the
- * status to clear the XOR
- */
- /* Read Latched signals */
- latch_status = REG_RD(bp,
- NIG_REG_LATCH_STATUS_0 + port*8);
- is_mi_int_status = REG_RD(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4);
- DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x,"
- "latch_status = 0x%x\n",
- is_mi_int, is_mi_int_status, latch_status);
- /* Handle only those with latched-signal=up.*/
- if (latch_status & 1) {
- /* For all latched-signal=up,Write original_signal to status */
- if (is_mi_int)
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0
- + port*4,
- NIG_STATUS_EMAC0_MI_INT);
- else
- bnx2x_bits_dis(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0
- + port*4,
- NIG_STATUS_EMAC0_MI_INT);
- /* For all latched-signal=up : Re-Arm Latch signals */
- REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
- (latch_status & 0xfffe) | (latch_status & 1));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+ an_10_100_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (phy->req_line_speed == SPEED_10000)) {
+ DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ /* Restart autoneg for 10G*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
+ } else if (phy->req_line_speed != SPEED_10 &&
+ phy->req_line_speed != SPEED_100) {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ 1);
}
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, params);
+
+ return 0;
}
-/*
- * link management
- */
-static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g,
- u8 is_mi_int)
+
+static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
- /* first reset all status
- * we assume only one line will be change at a time */
- bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
- if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
- == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
- (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
- == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
- bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
- }
- if (vars->phy_link_up) {
- if (is_10g) {
- /* Disable the 10G link interrupt
- * by writing 1 to the status register
- */
- DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- NIG_STATUS_XGXS0_LINK10G);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
- } else if (params->switch_cfg == SWITCH_CFG_10G) {
- /* Disable the link interrupt
- * by writing 1 to the relevant lane
- * in the status register
- */
- u32 ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+ return bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
- DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
- vars->line_speed);
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- ((1 << ser_lane) <<
- NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
+static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port, initialize = 1;
+ u16 val;
+ u16 temp;
+ u32 actual_phy_selection;
+ u8 rc = 0;
- } else { /* SerDes */
- DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /* Disable the link interrupt
- * by writing 1 to the status register
- */
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- NIG_STATUS_SERDES0_LINK_STATUS);
- }
+ /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
- } else { /* link_down */
+ msleep(1);
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+ msleep(200); /* 100 is not enough */
+
+ /* BCM84823 requires that XGXS links up first @ 10G for normal
+ behavior */
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+
+ /* Set dual-media configuration according to configuration */
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, &val);
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+ MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+ MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+ MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+ val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
+
+ actual_phy_selection = bnx2x_phy_selection(params);
+
+ switch (actual_phy_selection) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ /* Do nothing. Essentialy this is like the priority copper */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ /* Do nothing here. The first PHY won't be initialized at all */
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+ initialize = 0;
+ break;
}
+ if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+ val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_MEDIA, val);
+ DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+ params->multi_phy_config, val);
+
+ if (initialize)
+ rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+ else
+ bnx2x_save_848xx_spirom_version(phy, params);
+ return rc;
}
-static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
- u8 *str_ptr = str;
- u32 mask = 0xf0000000;
- u8 shift = 8*4;
- u8 digit;
- if (len < 10) {
- /* Need more than 10chars for this format */
- *str_ptr = '\0';
- return -EINVAL;
- }
- while (shift > 0) {
+ struct bnx2x *bp = params->bp;
+ u16 val, val1, val2;
+ u8 link_up = 0;
+
+ /* Check 10G-BaseT link status */
+ /* Check PMD signal ok */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, 0xFFFA, &val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ &val2);
+ DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+ /* Check link 10G */
+ if (val2 & (1<<11)) {
+ vars->line_speed = SPEED_10000;
+ link_up = 1;
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ } else { /* Check Legacy speed link */
+ u16 legacy_status, legacy_speed;
+
+ /* Enable expansion register 0x42 (Operation mode status) */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+ /* Get legacy speed operation status */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+ &legacy_status);
+
+ DP(NETIF_MSG_LINK, "Legacy speed status"
+ " = 0x%x\n", legacy_status);
+ link_up = ((legacy_status & (1<<11)) == (1<<11));
+ if (link_up) {
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else /* Should not happen */
+ vars->line_speed = 0;
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
- str_ptr++;
- mask = mask >> 4;
- if (shift == 4*4) {
- *str_ptr = ':';
- str_ptr++;
+ if (legacy_status & (1<<8))
+ vars->duplex = DUPLEX_FULL;
+ else
+ vars->duplex = DUPLEX_HALF;
+
+ DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
+ " is_duplex_full= %d\n", vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+ /* Check legacy speed AN resolution */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
}
}
- *str_ptr = '\0';
- return 0;
+ if (link_up) {
+ DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ vars->line_speed);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+
+ return link_up;
}
-u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len)
+static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- struct bnx2x *bp;
- u32 ext_phy_type = 0;
- u32 spirom_ver = 0;
- u8 status;
+ u8 status = 0;
+ u32 spirom_ver;
+ spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+ status = bnx2x_format_ver(spirom_ver, str, len);
+ return status;
+}
- if (version == NULL || params == NULL)
- return -EINVAL;
- bp = params->bp;
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
- spirom_ver = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].ext_phy_fw_version));
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+ bnx2x_cl45_write(params->bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
- status = 0;
- /* reset the returned value to zero */
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+}
- if (len < 5)
- return -EINVAL;
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
- version[0] = (spirom_ver & 0xFF);
- version[1] = (spirom_ver & 0xFF00) >> 8;
- version[2] = (spirom_ver & 0xFF0000) >> 16;
- version[3] = (spirom_ver & 0xFF000000) >> 24;
- version[4] = '\0';
+ switch (mode) {
+ case LED_MODE_OFF:
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- status = bnx2x_format_ver(spirom_ver, version, len);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
- (spirom_ver & 0x7F);
- status = bnx2x_format_ver(spirom_ver, version, len);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- version[0] = '\0';
- break;
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:"
- " type is FAILURE!\n");
- status = -EINVAL;
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
break;
+ case LED_MODE_FRONT_PANEL_OFF:
- default:
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+ params->port);
+
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
+ }
break;
- }
- return status;
-}
+ case LED_MODE_ON:
-static void bnx2x_set_xgxs_loopback(struct link_params *params,
- struct link_vars *vars,
- u8 is_10g)
-{
- u8 port = params->port;
- struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
- if (is_10g) {
- u32 md_devad;
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= 0x8000;
+ val |= 0x2492;
- DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
- /* change the uni_phy_addr in the nig */
- md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
- bnx2x_cl45_write(bp, port, 0,
- params->phy_addr,
- 5,
- (MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)),
- 0x2800);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
- bnx2x_cl45_write(bp, port, 0,
- params->phy_addr,
- 5,
- (MDIO_REG_BANK_CL73_IEEEB0 +
- (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
- 0x6041);
- msleep(200);
- /* set aer mmd back */
- bnx2x_set_aer_mmd(params, vars);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
+ }
+ break;
- /* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- md_devad);
+ case LED_MODE_OPER:
- } else {
- u16 mii_control;
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
- DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+ if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+ SHARED_HW_CFG_LED_EXTPHY1) {
- CL45_RD_OVER_CL22(bp, port,
- params->phy_addr,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ /* Set control reg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+
+ if (!((val &
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
+ DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 0xa492);
+ }
- CL45_WR_OVER_CL22(bp, port,
- params->phy_addr,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
+ /* Set LED masks */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
+
+ } else {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x80);
+ }
+ break;
}
}
+/******************************************************************/
+/* SFX7101 PHY SECTION */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ /* SFX7101_XGXS_TEST1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
-
-static void bnx2x_ext_phy_loopback(struct link_params *params)
+static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
+ u16 fw_ver1, fw_ver2, val;
struct bnx2x *bp = params->bp;
- u8 ext_phy_addr;
- u32 ext_phy_type;
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
- if (params->switch_cfg == SWITCH_CFG_10G) {
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- /* CL37 Autoneg Enabled */
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
- DP(NETIF_MSG_LINK,
- "ext_phy_loopback: We should not get here\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 0x0001);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- /* SFX7101_XGXS_TEST1 */
- bnx2x_cl45_write(bp, params->port, ext_phy_type,
- ext_phy_addr,
- MDIO_XS_DEVAD,
- MDIO_XS_SFX7101_XGXS_TEST1,
- 0x100);
- DP(NETIF_MSG_LINK,
- "ext_phy_loopback: set ext phy loopback\n");
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+ /* Restore normal power mode*/
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ /* HW reset */
+ bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
+ DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+ /* Restart autoneg */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+ val |= 0x200;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+ /* Save spirom version */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+ bnx2x_save_spirom_version(bp, params->port,
+ (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+ return 0;
+}
- break;
- } /* switch external PHY type */
- } else {
- /* serdes */
- ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
- ext_phy_addr = (params->ext_phy_config &
- PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK)
- >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT;
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 link_up;
+ u16 val1, val2;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+ val2, val1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+ DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+ val2, val1);
+ link_up = ((val1 & 4) == 4);
+ /* if link is up
+ * print the AN outcome of the SFX7101 PHY
+ */
+ if (link_up) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+ &val2);
+ vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+ val2, (val2 & (1<<14)));
+ bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
}
+ return link_up;
}
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requsted led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
- u32 led_idx, u32 value)
+static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- u32 reg_val;
+ if (*len < 5)
+ return -EINVAL;
+ str[0] = (spirom_ver & 0xFF);
+ str[1] = (spirom_ver & 0xFF00) >> 8;
+ str[2] = (spirom_ver & 0xFF0000) >> 16;
+ str[3] = (spirom_ver & 0xFF000000) >> 24;
+ str[4] = '\0';
+ *len -= 5;
+ return 0;
+}
- /* If port 0 then use EMAC0, else use EMAC1*/
- u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() port %x led_idx %d value %d\n",
- port, led_idx, value);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
- switch (led_idx) {
- case 0: /* 10MB led */
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 10M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_10MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 1: /*100MB led */
- /*Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 100M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_100MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ for (cnt = 0; cnt < 10; cnt++) {
+ msleep(50);
+ /* Writes a self-clearing reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
+ /* Wait for clear */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
+
+ if ((val & (1<<15)) == 0)
+ break;
+ }
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params) {
+ /* Low power mode is controlled by GPIO 2 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ /* The PHY reset is controlled by GPIO 1 */
+ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ u16 val = 0;
+ struct bnx2x *bp = params->bp;
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ val = 2;
break;
- case 2: /* 1000MB led */
- /* Read the current value of the LED register in the
- EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
- reset it. */
- reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ case LED_MODE_ON:
+ val = 1;
break;
- case 3: /* 2500MB led */
- /* Read the current value of the LED register in the
- EMAC block*/
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
+ case LED_MODE_OPER:
+ val = 0;
break;
- case 4: /*10G led */
- if (port == 0) {
- REG_WR(bp, NIG_REG_LED_10G_P0,
- value);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7107_LINK_LED_CNTL,
+ val);
+}
+
+/******************************************************************/
+/* STATIC PHY DECLARATION */
+/******************************************************************/
+
+static struct bnx2x_phy phy_null = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+ .addr = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = 0,
+ .media_type = ETH_PHY_NOT_PRESENT,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)NULL,
+ .read_status = (read_status_t)NULL,
+ .link_reset = (link_reset_t)NULL,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_serdes = {
+ .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .flags = 0,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_init_serdes,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_xgxs = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .flags = 0,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_init_xgxs,
+ .read_status = (read_status_t)bnx2x_link_settings_status,
+ .link_reset = (link_reset_t)bnx2x_int_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_7101 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_7101_config_init,
+ .read_status = (read_status_t)bnx2x_7101_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8073 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+ .addr = 0xff,
+ .flags = FLAGS_HW_LOCK_REQUIRED,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8073_config_init,
+ .read_status = (read_status_t)bnx2x_8073_read_status,
+ .link_reset = (link_reset_t)bnx2x_8073_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8705 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+ .addr = 0xff,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_XFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8705_config_init,
+ .read_status = (read_status_t)bnx2x_8705_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8706 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+ .addr = 0xff,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8706_config_init,
+ .read_status = (read_status_t)bnx2x_8706_read_status,
+ .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8726 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+ .addr = 0xff,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_INIT_XGXS_FIRST),
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8726_config_init,
+ .read_status = (read_status_t)bnx2x_8726_read_status,
+ .link_reset = (link_reset_t)bnx2x_8726_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8727 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_SFP_FIBER,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8727_config_init,
+ .read_status = (read_status_t)bnx2x_8727_read_status,
+ .link_reset = (link_reset_t)bnx2x_8727_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+};
+static struct bnx2x_phy phy_8481 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_8481_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_8481_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84823 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
+/*****************************************************************/
+/* */
+/* Populate the phy according. Main function: bnx2x_populate_phy */
+/* */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+ struct bnx2x_phy *phy, u8 port,
+ u8 phy_index)
+{
+ /* Get the 4 lanes xgxs config rx and tx */
+ u32 rx = 0, tx = 0, i;
+ for (i = 0; i < 2; i++) {
+ /**
+ * INT_PHY and EXT_PHY1 share the same value location in the
+ * shmem. When num_phys is greater than 1, than this value
+ * applies only to EXT_PHY1
+ */
+ if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
} else {
- REG_WR(bp, NIG_REG_LED_10G_P1,
- value);
+ rx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+ tx = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
}
+
+ phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+ phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+ phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+ phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+ }
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+ u8 phy_index, u8 port)
+{
+ u32 ext_phy_config = 0;
+ switch (phy_index) {
+ case EXT_PHY1:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config));
break;
- case 5: /* TRAFFIC led */
- /* Find if the traffic control is via BMAC or EMAC */
- if (port == 0)
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
- else
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
+ case EXT_PHY2:
+ ext_phy_config = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].external_phy_config2));
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+ return -EINVAL;
+ }
- /* Override the traffic led in the EMAC:*/
- if (reg_val == 1) {
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base +
- EMAC_REG_EMAC_LED);
- /* Set the TRAFFIC_OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the TRAFFIC bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
- (reg_val & ~EMAC_LED_TRAFFIC);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- } else { /* Override the traffic led in the BMAC: */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
- value);
- }
+ return ext_phy_config;
+}
+static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+ struct bnx2x_phy *phy)
+{
+ u32 phy_addr;
+ u32 chip_id;
+ u32 switch_cfg = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_feature_config[port].link_config)) &
+ PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case SWITCH_CFG_10G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
break;
default:
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() unknown led index %d "
- "(should be 0-5)\n", led_idx);
+ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
return -EINVAL;
}
+ phy->addr = (u8)phy_addr;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+ port);
+ phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+
+ DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+ port, phy->addr, phy->mdio_ctrl);
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
return 0;
}
-
-u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
+static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
+ u8 phy_index,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port,
+ struct bnx2x_phy *phy)
{
- u8 port = params->port;
- u16 hw_led_mode = params->hw_led_mode;
- u8 rc = 0;
- u32 tmp;
- u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- struct bnx2x *bp = params->bp;
- DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
- DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
- speed, hw_led_mode);
- switch (mode) {
- case LED_MODE_OFF:
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- SHARED_HW_CFG_LED_MAC1);
-
- tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+ u32 ext_phy_config, phy_type, config2;
+ u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+ ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+ phy_index, port);
+ phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ /* Select the phy type */
+ switch (phy_type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+ *phy = phy_8073;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+ *phy = phy_8705;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+ *phy = phy_8706;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8726;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+ /* BCM8727_NOC => BCM8727 no over current */
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ phy->flags |= FLAGS_NOC;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+ *phy = phy_8727;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+ *phy = phy_8481;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ *phy = phy_84823;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+ *phy = phy_7101;
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ *phy = phy_null;
+ return -EINVAL;
+ default:
+ *phy = phy_null;
+ return 0;
+ }
- case LED_MODE_OPER:
- if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
- } else {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
- }
+ phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+ bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
- port*4, 0);
- /* Set blinking rate to ~15.9Hz */
- REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
- LED_BLINK_RATE_VAL);
- REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
- port*4, 1);
- tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ /**
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
+ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+ dev_info.shared_hw_config.config2));
+ if (phy_index == EXT_PHY1) {
+ phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+ port_mb[port].ext_phy_fw_version);
+
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ } else {
+ u32 size = REG_RD(bp, shmem2_base);
- if (CHIP_IS_E1(bp) &&
- ((speed == SPEED_2500) ||
- (speed == SPEED_1000) ||
- (speed == SPEED_100) ||
- (speed == SPEED_10))) {
- /* On Everest 1 Ax chip versions for speeds less than
- 10G LED scheme is different */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
- port*4, 0);
- REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
- port*4, 1);
+ if (size >
+ offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+ phy->ver_addr = shmem2_base +
+ offsetof(struct shmem2_region,
+ ext_phy_fw_version2[port]);
}
- break;
-
- default:
- rc = -EINVAL;
- DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
- mode);
- break;
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+ mdc_mdio_access = (config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+ (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
}
- return rc;
+ phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+ /**
+ * In case mdc/mdio_access of the external phy is different than the
+ * mdc/mdio access of the XGXS, a HW lock must be taken in each access
+ * to prevent one port interfere with another port's CL45 operations.
+ */
+ if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
+ phy->flags |= FLAGS_HW_LOCK_REQUIRED;
+ DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+ phy_type, port, phy_index);
+ DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
+ phy->addr, phy->mdio_ctrl);
+ return 0;
}
-u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars)
+static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+ u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
{
- struct bnx2x *bp = params->bp;
- u16 gp_status = 0;
-
- CL45_RD_OVER_CL22(bp, params->port,
- params->phy_addr,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
- /* link is up only if both local phy and external phy are up */
- if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
- bnx2x_ext_phy_is_link_up(params, vars, 1))
- return 0;
-
- return -ESRCH;
+ u8 status = 0;
+ phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+ if (phy_index == INT_PHY)
+ return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+ status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, phy);
+ return status;
}
-static u8 bnx2x_link_initialize(struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_phy_def_cfg(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 phy_index)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 rc = 0;
- u8 non_ext_phy;
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* Activate the external PHY */
- bnx2x_ext_phy_reset(params, vars);
-
- bnx2x_set_aer_mmd(params, vars);
+ u32 link_config;
+ /* Populate the default phy configuration for MF mode */
+ if (phy_index == EXT_PHY2) {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config2));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].speed_capability_mask2));
+ } else {
+ link_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].link_config));
+ phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].speed_capability_mask));
+ }
+ DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
+ " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
+
+ phy->req_duplex = DUPLEX_FULL;
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ case PORT_FEATURE_LINK_SPEED_10M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_10M_FULL:
+ phy->req_line_speed = SPEED_10;
+ break;
+ case PORT_FEATURE_LINK_SPEED_100M_HALF:
+ phy->req_duplex = DUPLEX_HALF;
+ case PORT_FEATURE_LINK_SPEED_100M_FULL:
+ phy->req_line_speed = SPEED_100;
+ break;
+ case PORT_FEATURE_LINK_SPEED_1G:
+ phy->req_line_speed = SPEED_1000;
+ break;
+ case PORT_FEATURE_LINK_SPEED_2_5G:
+ phy->req_line_speed = SPEED_2500;
+ break;
+ case PORT_FEATURE_LINK_SPEED_10G_CX4:
+ phy->req_line_speed = SPEED_10000;
+ break;
+ default:
+ phy->req_line_speed = SPEED_AUTO_NEG;
+ break;
+ }
- if (vars->phy_flags & PHY_XGXS_FLAG)
- bnx2x_set_master_ln(params);
+ switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
+ case PORT_FEATURE_FLOW_CONTROL_AUTO:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_TX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_RX:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+ case PORT_FEATURE_FLOW_CONTROL_BOTH:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+ default:
+ phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ break;
+ }
+}
- rc = bnx2x_reset_unicore(params);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
- return rc;
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+ u32 phy_config_swapped, prio_cfg;
+ u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ prio_cfg = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SELECTION_MASK;
+
+ if (phy_config_swapped) {
+ switch (prio_cfg) {
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+ break;
+ }
+ } else
+ return_cfg = prio_cfg;
- bnx2x_set_aer_mmd(params, vars);
+ return return_cfg;
+}
- /* setting the masterLn_def again after the reset */
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- bnx2x_set_master_ln(params);
- bnx2x_set_swap_lanes(params);
- }
- if (vars->phy_flags & PHY_XGXS_FLAG) {
- if ((params->req_line_speed &&
- ((params->req_line_speed == SPEED_100) ||
- (params->req_line_speed == SPEED_10))) ||
- (!params->req_line_speed &&
- (params->speed_cap_mask >=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
- (params->speed_cap_mask <
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
- )) {
- vars->phy_flags |= PHY_SGMII_FLAG;
- } else {
- vars->phy_flags &= ~PHY_SGMII_FLAG;
+u8 bnx2x_phy_probe(struct link_params *params)
+{
+ u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u32 phy_config_swapped;
+ struct bnx2x *bp = params->bp;
+ struct bnx2x_phy *phy;
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "Begin phy probe\n");
+ phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
}
+ DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+ " actual_phy_idx %x\n", phy_config_swapped,
+ phy_index, actual_phy_idx);
+ phy = &params->phy[actual_phy_idx];
+ if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+ params->shmem2_base, params->port,
+ phy) != 0) {
+ params->num_phys = 0;
+ DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+ phy_index);
+ for (phy_index = INT_PHY;
+ phy_index < MAX_PHYS;
+ phy_index++)
+ *phy = phy_null;
+ return -EINVAL;
+ }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+ break;
+
+ bnx2x_phy_def_cfg(params, phy, phy_index);
+ params->num_phys++;
}
- /* In case of external phy existance, the line speed would be the
- line speed linked up by the external phy. In case it is direct only,
- then the line_speed during initialization will be equal to the
- req_line_speed*/
- vars->line_speed = params->req_line_speed;
- bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
+ DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+ return 0;
+}
- /* init ext phy and enable link state int */
- non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
- (params->loopback_mode == LOOPBACK_XGXS_10));
+u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
+{
+ if (phy_idx < params->num_phys)
+ return params->phy[phy_idx].supported;
+ return 0;
+}
- if (non_ext_phy ||
- (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
- (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
- (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
- (params->loopback_mode == LOOPBACK_EXT_PHY)) {
- if (params->req_line_speed == SPEED_AUTO_NEG)
- bnx2x_set_parallel_detection(params, vars->phy_flags);
- bnx2x_init_internal_phy(params, vars, non_ext_phy);
- }
+static void set_phy_vars(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 actual_phy_idx, phy_index, link_cfg_idx;
+ u8 phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
- if (!non_ext_phy)
- rc |= bnx2x_ext_phy_init(params, vars);
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
- bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
- return rc;
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+ DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x\n",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
}
-
u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u32 val;
-
DP(NETIF_MSG_LINK, "Phy Initialization started\n");
- DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n",
- params->req_line_speed, params->req_flow_ctrl);
+ DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -5966,11 +6705,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
-
- if (params->switch_cfg == SWITCH_CFG_1G)
- vars->phy_flags = PHY_SERDES_FLAG;
- else
- vars->phy_flags = PHY_XGXS_FLAG;
+ vars->phy_flags = 0;
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -5981,6 +6716,13 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_init(params, vars);
+ if (params->num_phys == 0) {
+ DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+ return -EINVAL;
+ }
+ set_phy_vars(params);
+
+ DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
if (CHIP_REV_IS_FPGA(bp)) {
vars->link_up = 1;
@@ -6040,7 +6782,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_phy_deassert(params, vars->phy_flags);
+ bnx2x_xgxs_deassert(params);
+
/* set bmac loopback */
bnx2x_bmac_enable(params, vars, 1);
@@ -6057,80 +6800,66 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->phy_flags = PHY_XGXS_FLAG;
- bnx2x_phy_deassert(params, vars->phy_flags);
+ bnx2x_xgxs_deassert(params);
/* set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
- bnx2x_emac_program(params, vars->line_speed,
- vars->duplex);
+ bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
params->port*4, 0);
- } else if ((params->loopback_mode == LOOPBACK_XGXS_10) ||
+ } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->duplex = DUPLEX_FULL;
+ if (params->req_line_speed[0] == SPEED_1000) {
+ vars->line_speed = SPEED_1000;
+ vars->mac_type = MAC_TYPE_EMAC;
+ } else {
+ vars->line_speed = SPEED_10000;
+ vars->mac_type = MAC_TYPE_BMAC;
+ }
- vars->phy_flags = PHY_XGXS_FLAG;
-
- val = REG_RD(bp,
- NIG_REG_XGXS0_CTRL_PHY_ADDR+
- params->port*0x18);
- params->phy_addr = (u8)val;
-
- bnx2x_phy_deassert(params, vars->phy_flags);
+ bnx2x_xgxs_deassert(params);
bnx2x_link_initialize(params, vars);
- vars->mac_type = MAC_TYPE_BMAC;
-
+ if (params->req_line_speed[0] == SPEED_1000) {
+ bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
+ } else
bnx2x_bmac_enable(params, vars, 0);
- if (params->loopback_mode == LOOPBACK_XGXS_10) {
+ if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
- bnx2x_set_xgxs_loopback(params, vars, 1);
+ params->phy[INT_PHY].config_loopback(
+ &params->phy[INT_PHY],
+ params);
+
} else {
/* set external phy loopback */
- bnx2x_ext_phy_loopback(params);
+ u8 phy_index;
+ for (phy_index = EXT_PHY1;
+ phy_index < params->num_phys; phy_index++) {
+ if (params->phy[phy_index].config_loopback)
+ params->phy[phy_index].config_loopback(
+ &params->phy[phy_index],
+ params);
+ }
}
+
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
params->port*4, 0);
- bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, vars->line_speed);
} else
/* No loopback */
{
- bnx2x_phy_deassert(params, vars->phy_flags);
- switch (params->switch_cfg) {
- case SWITCH_CFG_1G:
- vars->phy_flags |= PHY_SERDES_FLAG;
- if ((params->ext_phy_config &
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
- PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
- vars->phy_flags |= PHY_SGMII_FLAG;
- }
-
- val = REG_RD(bp,
- NIG_REG_SERDES0_CTRL_PHY_ADDR+
- params->port*0x10);
-
- params->phy_addr = (u8)val;
-
- break;
- case SWITCH_CFG_10G:
- vars->phy_flags |= PHY_XGXS_FLAG;
- val = REG_RD(bp,
- NIG_REG_XGXS0_CTRL_PHY_ADDR+
- params->port*0x18);
- params->phy_addr = (u8)val;
-
- break;
- default:
- DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
- return -EINVAL;
- }
- DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
+ if (params->switch_cfg == SWITCH_CFG_10G)
+ bnx2x_xgxs_deassert(params);
+ else
+ bnx2x_serdes_deassert(bp, params->port);
bnx2x_link_initialize(params, vars);
msleep(30);
@@ -6138,29 +6867,11 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
return 0;
}
-
-static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
-{
- DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
-
- /* Set serial boot control for external load */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL, 0x0001);
-}
-
u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u32 ext_phy_config = params->ext_phy_config;
- u8 port = params->port;
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
- u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
+ u8 phy_index, port = params->port;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -6189,73 +6900,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
* Hold it as vars low
*/
/* clear link led */
- bnx2x_set_led(params, LED_MODE_OFF, 0);
- if (reset_ext_phy) {
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- break;
+ bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- {
-
- /* Disable Transmitter */
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr, 0);
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
- "low power mode\n",
- port);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
- break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- {
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- /* Set soft reset */
- bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
- break;
- }
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- {
- u8 ext_phy_addr =
- XGXS_EXT_PHY_ADDR(params->ext_phy_config);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_CTRL, 0x0000);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
- ext_phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 1);
- break;
- }
- default:
- /* HW reset */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
- DP(NETIF_MSG_LINK, "reset external PHY\n");
+ if (reset_ext_phy) {
+ for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+ phy_index++) {
+ if (params->phy[phy_index].link_reset)
+ params->phy[phy_index].link_reset(
+ &params->phy[phy_index],
+ params);
}
}
- /* reset the SerDes/XGXS */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
- (0x1ff << (port*16)));
+ if (params->phy[INT_PHY].link_reset)
+ params->phy[INT_PHY].link_reset(
+ &params->phy[INT_PHY], params);
/* reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -6269,183 +6928,25 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
return 0;
}
-static u8 bnx2x_update_link_down(struct link_params *params,
- struct link_vars *vars)
+/****************************************************************************/
+/* Common function */
+/****************************************************************************/
+static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 phy_index)
{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
-
- DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
- bnx2x_set_led(params, LED_MODE_OFF, 0);
-
- /* indicate no mac active */
- vars->mac_type = MAC_TYPE_NONE;
-
- /* update shared memory */
- vars->link_status = 0;
- vars->line_speed = 0;
- bnx2x_update_mng(params, vars->link_status);
-
- /* activate nig drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
-
- /* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
-
- msleep(10);
-
- /* reset BigMac */
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- return 0;
-}
-
-static u8 bnx2x_update_link_up(struct link_params *params,
- struct link_vars *vars,
- u8 link_10g, u32 gp_status)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u8 rc = 0;
-
- vars->link_status |= LINK_STATUS_LINK_UP;
- if (link_10g) {
- bnx2x_bmac_enable(params, vars, 0);
- bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
- } else {
- rc = bnx2x_emac_program(params, vars->line_speed,
- vars->duplex);
-
- bnx2x_emac_enable(params, vars, 0);
-
- /* AN complete? */
- if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
- if (!(vars->phy_flags &
- PHY_SGMII_FLAG))
- bnx2x_set_gmii_tx_driver(params);
- }
- }
-
- /* PBF - link up */
- rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
-
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
- msleep(20);
- return rc;
-}
-/* This function should called upon link interrupt */
-/* In case vars->link_up, driver needs to
- 1. Update the pbf
- 2. Disable drain
- 3. Update the shared memory
- 4. Indicate link up
- 5. Set LEDs
- Otherwise,
- 1. Update shared memory
- 2. Reset BigMac
- 3. Report link down
- 4. Unset LEDs
-*/
-u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u8 port = params->port;
- u16 gp_status;
- u8 link_10g;
- u8 ext_phy_link_up, rc = 0;
- u32 ext_phy_type;
- u8 is_mi_int = 0;
-
- DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
- port, (vars->phy_flags & PHY_XGXS_FLAG),
- REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
-
- is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port*0x18) > 0);
- DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
- REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
- is_mi_int,
- REG_RD(bp,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
-
- DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
- REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
-
- /* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
-
- ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
-
- /* Check external link change only for non-direct */
- ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
-
- /* Read gp_status */
- CL45_RD_OVER_CL22(bp, port, params->phy_addr,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
-
- rc = bnx2x_link_settings_status(params, vars, gp_status,
- ext_phy_link_up);
- if (rc != 0)
- return rc;
-
- /* anything 10 and over uses the bmac */
- link_10g = ((vars->line_speed == SPEED_10000) ||
- (vars->line_speed == SPEED_12000) ||
- (vars->line_speed == SPEED_12500) ||
- (vars->line_speed == SPEED_13000) ||
- (vars->line_speed == SPEED_15000) ||
- (vars->line_speed == SPEED_16000));
-
- bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
-
- /* In case external phy link is up, and internal link is down
- ( not initialized yet probably after link initialization, it needs
- to be initialized.
- Note that after link down-up as result of cable plug,
- the xgxs link would probably become up again without the need to
- initialize it*/
-
- if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
- (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
- (ext_phy_link_up && !vars->phy_link_up))
- bnx2x_init_internal_phy(params, vars, 0);
-
- /* link is up only if both local phy and external phy are up */
- vars->link_up = (ext_phy_link_up && vars->phy_link_up);
-
- if (vars->link_up)
- rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
- else
- rc = bnx2x_update_link_down(params, vars);
-
- return rc;
-}
-
-static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
-{
- u8 ext_phy_addr[PORT_MAX];
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
s8 port;
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
/* Extract the ext phy address for the port */
- u32 ext_phy_config = REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].external_phy_config));
-
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -6453,17 +6954,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
-
/* Need to take the phy out of low power mode in order
to write to access its registers */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
/* Reset the phy */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, &phy[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL,
1<<15);
@@ -6472,15 +6969,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
/* Add delay of 150ms after reset */
msleep(150);
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
+
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u16 fw_ver1;
- bnx2x_bcm8073_external_rom_boot(bp, port,
- ext_phy_addr[port], shmem_base);
+ bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port);
- bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6492,16 +6996,12 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
}
/* Only set bit 10 = 1 (Tx power down) */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, &val);
/* Phase1 of TX_POWER_DOWN reset */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN,
(val | 1<<10));
@@ -6515,28 +7015,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
/* Phase2 of POWER_DOWN_RESET */
/* Release bit 10 (Release Tx power down) */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, &val);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
msleep(15);
/* Read modify write the SPI-ROM version select register */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_EDC_FFE_MAIN, &val);
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
@@ -6545,33 +7037,74 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return 0;
-
}
-static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 phy_index)
+{
+ u32 val;
+ s8 port;
+ struct bnx2x_phy phy;
+ /* Use port1 because of the static port-swap */
+ /* Enable the module detection interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= ((1<<MISC_REGISTERS_GPIO_3)|
+ (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+ bnx2x_ext_phy_hw_reset(bp, 1);
+ msleep(5);
+ for (port = 0; port < PORT_MAX; port++) {
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Reset phy*/
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+ /* Set fault module detected LED on */
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
+ }
+
+ return 0;
+}
+static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 phy_index)
{
- u8 ext_phy_addr[PORT_MAX];
- s8 port, first_port, i;
+ s8 port;
u32 swap_val, swap_override;
+ struct bnx2x_phy phy[PORT_MAX];
+ struct bnx2x_phy *phy_blk[PORT_MAX];
DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override));
- msleep(5);
+ port = 1;
- if (swap_val && swap_override)
- first_port = PORT_0;
- else
- first_port = PORT_1;
+ bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+
+ /* Calculate the port based on port swap */
+ port ^= (swap_val && swap_override);
+
+ msleep(5);
/* PART1 - Reset both phys */
- for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
/* Extract the ext phy address for the port */
- u32 ext_phy_config = REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].external_phy_config));
-
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy[port]) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return -EINVAL;
+ }
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -6579,12 +7112,9 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
/* Reset the phy */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr[port],
+ bnx2x_cl45_write(bp, &phy[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL,
1<<15);
@@ -6592,16 +7122,20 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
/* Add delay of 150ms after reset */
msleep(150);
-
+ if (phy[PORT_0].addr & 0x1) {
+ phy_blk[PORT_0] = &(phy[PORT_1]);
+ phy_blk[PORT_1] = &(phy[PORT_0]);
+ } else {
+ phy_blk[PORT_0] = &(phy[PORT_0]);
+ phy_blk[PORT_1] = &(phy[PORT_1]);
+ }
/* PART2 - Download firmware to both phys */
- for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) {
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u16 fw_ver1;
- bnx2x_bcm8727_external_rom_boot(bp, port,
- ext_phy_addr[port], shmem_base);
-
- bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
- ext_phy_addr[port],
+ bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port);
+ bnx2x_cl45_read(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER1, &fw_ver1);
if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6616,82 +7150,32 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
return 0;
}
-
-static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
-{
- u8 ext_phy_addr;
- u32 val;
- s8 port;
-
- /* Use port1 because of the static port-swap */
- /* Enable the module detection interrupt */
- val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
- val |= ((1<<MISC_REGISTERS_GPIO_3)|
- (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
- REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
-
- bnx2x_ext_phy_hw_reset(bp, 1);
- msleep(5);
- for (port = 0; port < PORT_MAX; port++) {
- /* Extract the ext phy address for the port */
- u32 ext_phy_config = REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[port].external_phy_config));
-
- ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
- DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
- ext_phy_addr);
-
- bnx2x_8726_reset_phy(bp, port, ext_phy_addr);
-
- /* Set fault module detected LED on */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- port);
- }
-
- return 0;
-}
-
-
-static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
-{
- /* HW reset */
- bnx2x_ext_phy_hw_reset(bp, 1);
- return 0;
-}
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 phy_index,
+ u32 ext_phy_type)
{
u8 rc = 0;
- u32 ext_phy_type;
-
- DP(NETIF_MSG_LINK, "Begin common phy init\n");
-
- /* Read the ext_phy_type for arbitrary port(0) */
- ext_phy_type = XGXS_EXT_PHY_TYPE(
- REG_RD(bp, shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[0].external_phy_config)));
switch (ext_phy_type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- {
- rc = bnx2x_8073_common_init_phy(bp, shmem_base);
+ rc = bnx2x_8073_common_init_phy(bp, shmem_base,
+ shmem2_base, phy_index);
break;
- }
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
- rc = bnx2x_8727_common_init_phy(bp, shmem_base);
+ rc = bnx2x_8727_common_init_phy(bp, shmem_base,
+ shmem2_base, phy_index);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
/* GPIO1 affects both ports, so there's need to pull
it for single port alone */
- rc = bnx2x_8726_common_init_phy(bp, shmem_base);
+ rc = bnx2x_8726_common_init_phy(bp, shmem_base,
+ shmem2_base, phy_index);
break;
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
- rc = bnx2x_84823_common_init_phy(bp, shmem_base);
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ rc = -EINVAL;
break;
default:
DP(NETIF_MSG_LINK,
@@ -6703,33 +7187,80 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
return rc;
}
-void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base)
{
- u16 val, cnt;
+ u8 rc = 0;
+ u8 phy_index;
+ u32 ext_phy_type, ext_phy_config;
+ DP(NETIF_MSG_LINK, "Begin common phy init\n");
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ if (CHIP_REV_IS_EMUL(bp))
+ return 0;
- for (cnt = 0; cnt < 10; cnt++) {
- msleep(50);
- /* Writes a self-clearing reset */
- bnx2x_cl45_write(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET,
- (val | (1<<15)));
- /* Wait for clear */
- bnx2x_cl45_read(bp, port,
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
- phy_addr,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ /* Read the ext_phy_type for arbitrary port(0) */
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ ext_phy_config = bnx2x_get_ext_phy_config(bp,
+ shmem_base,
+ phy_index, 0);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+ rc |= bnx2x_ext_phy_common_init(bp, shmem_base,
+ shmem2_base,
+ phy_index, ext_phy_type);
+ }
+ return rc;
+}
- if ((val & (1<<15)) == 0)
- break;
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
+{
+ u8 phy_index;
+ struct bnx2x_phy phy;
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ 0, &phy) != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+
+ if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
+ return 1;
+ }
+ return 0;
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port)
+{
+ u8 phy_index, fan_failure_det_req = 0;
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return 0;
+ }
+ fan_failure_det_req |= (phy.flags &
+ FLAGS_FAN_FAILURE_DET_REQ);
+ }
+ return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+ u8 phy_index;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (params->phy[phy_index].hw_reset) {
+ params->phy[phy_index].hw_reset(
+ &params->phy[phy_index],
+ params);
+ params->phy[phy_index] = phy_null;
+ }
}
}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 40c2981de8ed..e98ea3d19471 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2010 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -46,9 +46,137 @@
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
+
+#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
+ (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+ PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
+ ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define SINGLE_MEDIA(params) (params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define DUAL_MEDIA(params) (params->num_phys == 3)
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
/***********************************************************/
/* Structs */
/***********************************************************/
+#define INT_PHY 0
+#define EXT_PHY1 1
+#define EXT_PHY2 2
+#define MAX_PHYS 3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
+#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
+ 0 : (_phy_idx - 1))
+/***********************************************************/
+/* bnx2x_phy struct */
+/* Defines the required arguments and function per phy */
+/***********************************************************/
+struct link_vars;
+struct link_params;
+struct bnx2x_phy;
+
+typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+ struct link_vars *vars);
+typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+ struct link_params *params);
+typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode);
+typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
+ struct link_params *params, u32 action);
+
+struct bnx2x_phy {
+ u32 type;
+
+ /* Loaded during init */
+ u8 addr;
+
+ u8 flags;
+ /* Require HW lock */
+#define FLAGS_HW_LOCK_REQUIRED (1<<0)
+ /* No Over-Current detection */
+#define FLAGS_NOC (1<<1)
+ /* Fan failure detection required */
+#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
+ /* Initialize first the XGXS and only then the phy itself */
+#define FLAGS_INIT_XGXS_FIRST (1<<3)
+#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
+#define FLAGS_SFP_NOT_APPROVED (1<<7)
+
+ u8 def_md_devad;
+ u8 reserved;
+ /* preemphasis values for the rx side */
+ u16 rx_preemphasis[4];
+
+ /* preemphasis values for the tx side */
+ u16 tx_preemphasis[4];
+
+ /* EMAC address for access MDIO */
+ u32 mdio_ctrl;
+
+ u32 supported;
+
+ u32 media_type;
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFP_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_NOT_PRESENT 0xff
+
+ /* The address in which version is located*/
+ u32 ver_addr;
+
+ u16 req_flow_ctrl;
+
+ u16 req_line_speed;
+
+ u32 speed_cap_mask;
+
+ u16 req_duplex;
+ u16 rsrv;
+ /* Called per phy/port init, and it configures LASI, speed, autoneg,
+ duplex, flow control negotiation, etc. */
+ config_init_t config_init;
+
+ /* Called due to interrupt. It determines the link, speed */
+ read_status_t read_status;
+
+ /* Called when driver is unloading. Should reset the phy */
+ link_reset_t link_reset;
+
+ /* Set the loopback configuration for the phy */
+ config_loopback_t config_loopback;
+
+ /* Format the given raw number into str up to len */
+ format_fw_ver_t format_fw_ver;
+
+ /* Reset the phy (both ports) */
+ hw_reset_t hw_reset;
+
+ /* Set link led mode (on/off/oper)*/
+ set_link_led_t set_link_led;
+
+ /* PHY Specific tasks */
+ phy_specific_func_t phy_specific_func;
+#define DISABLE_TX 1
+#define ENABLE_TX 2
+};
+
/* Inputs parameters to the CLC */
struct link_params {
@@ -59,56 +187,50 @@ struct link_params {
#define LOOPBACK_NONE 0
#define LOOPBACK_EMAC 1
#define LOOPBACK_BMAC 2
-#define LOOPBACK_XGXS_10 3
+#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
#define LOOPBACK_EXT 5
- u16 req_duplex;
- u16 req_flow_ctrl;
- u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
- req_flow_ctrl is set to AUTO */
- u16 req_line_speed; /* Also determine AutoNeg */
-
/* Device parameters */
u8 mac_addr[6];
+ u16 req_duplex[LINK_CONFIG_SIZE];
+ u16 req_flow_ctrl[LINK_CONFIG_SIZE];
+
+ u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
/* shmem parameters */
u32 shmem_base;
- u32 speed_cap_mask;
+ u32 shmem2_base;
+ u32 speed_cap_mask[LINK_CONFIG_SIZE];
u32 switch_cfg;
#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
- u16 hw_led_mode; /* part of the hw_config read from the shmem */
-
- /* phy_addr populated by the phy_init function */
- u8 phy_addr;
- /*u8 reserved1;*/
-
u32 lane_config;
- u32 ext_phy_config;
-#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
- ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
-#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
- (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
- PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
-#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
- ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
/* Phy register parameter */
u32 chip_id;
- u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */
- u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
-
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
-#define FEATURE_CONFIG_BCM8727_NOC (1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+ /* Will be populated during common init */
+ struct bnx2x_phy phy[MAX_PHYS];
+
+ /* Will be populated during common init */
+ u8 num_phys;
+
+ u8 rsrv;
+ u16 hw_led_mode; /* part of the hw_config read from the shmem */
+ u32 multi_phy_config;
/* Device pointer passed to all callback functions */
struct bnx2x *bp;
+ u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+ req_flow_ctrl is set to AUTO */
};
/* Output parameters */
@@ -129,12 +251,6 @@ struct link_vars {
u16 flow_ctrl;
u16 ieee_fc;
- u32 autoneg;
-#define AUTO_NEG_DISABLED 0x0
-#define AUTO_NEG_ENABLED 0x1
-#define AUTO_NEG_COMPLETE 0x2
-#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
-
/* The same definitions as the shmem parameter */
u32 link_status;
};
@@ -142,8 +258,6 @@ struct link_vars {
/***********************************************************/
/* Functions */
/***********************************************************/
-
-/* Initialize the phy */
u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
/* Reset the link. Should be called when driver or interface goes down
@@ -155,17 +269,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
/* bnx2x_link_update should be called upon link interrupt */
u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
-/* use the following cl45 functions to read/write from external_phy
+/* use the following phy functions to read/write from external_phy
In order to use it to read/write internal phy registers, use
DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
- Use ext_phy_type of 0 in case of cl22 over cl45
the register */
-u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 *ret_val);
+u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val);
+
+u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val);
-u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
- u8 phy_addr, u8 devad, u16 reg, u16 val);
+u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val);
+u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@@ -178,9 +296,12 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
-u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
-#define LED_MODE_OFF 0
-#define LED_MODE_OPER 2
+u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
+ u8 mode, u32 speed);
+#define LED_MODE_OFF 0
+#define LED_MODE_ON 1
+#define LED_MODE_OPER 2
+#define LED_MODE_FRONT_PANEL_OFF 3
u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
@@ -190,17 +311,38 @@ void bnx2x_handle_module_detect_int(struct link_params *params);
/* Get the actual link status. In case it returns 0, link is up,
otherwise link is down*/
-u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
+u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
+ u8 is_serdes);
/* One-time initialization for external phy after power up */
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
+u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base);
/* Reset the external PHY using GPIO */
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
-void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr);
+/* Reset the external of SFX7101 */
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
-u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr,
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf);
+void bnx2x_hw_reset_phy(struct link_params *params);
+
+/* Checks if HW lock is required for this phy/board type */
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base);
+
+/* Returns the aggregative supported attributes of the phys on board */
+u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
+
+/* Check swap bit and adjust PHY order */
+u32 bnx2x_phy_selection(struct link_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+u8 bnx2x_phy_probe(struct link_params *params);
+/* Checks if fan failure detection is required on one of the phys on board */
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
+ u32 shmem2_base, u8 port);
+
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index b4ec2b02a465..67587fe9e358 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -781,7 +781,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
DP(NETIF_MSG_HW,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
- return -EINVAL;
+ return false;
}
if (func <= 5)
@@ -1227,26 +1227,66 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
return 0;
}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = 0;
+ if (bp->link_vars.link_up) {
+ sel_phy_idx = EXT_PHY1;
+ /* In case link is SERDES, check if the EXT_PHY2 is the one */
+ if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+ sel_phy_idx = EXT_PHY2;
+ } else {
+
+ switch (bnx2x_phy_selection(&bp->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY2;
+ break;
+ }
+ }
+ /*
+ * The selected actived PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == EXT_PHY1)
+ sel_phy_idx = EXT_PHY2;
+ else if (sel_phy_idx == EXT_PHY2)
+ sel_phy_idx = EXT_PHY1;
+ }
+ return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
+ u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
switch (bp->link_vars.ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
- bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
break;
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
- bp->port.advertising |= (ADVERTISED_Asym_Pause |
+ bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
break;
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
- bp->port.advertising |= ADVERTISED_Asym_Pause;
+ bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
break;
default:
- bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
+ bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
break;
}
@@ -1257,7 +1297,8 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
if (!BP_NOMCP(bp)) {
u8 rc;
-
+ int cfx_idx = bnx2x_get_link_cfg_idx(bp);
+ u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
/* Initialize link parameters structure variables */
/* It is recommended to turn off RX FC for jumbo frames
for better performance */
@@ -1268,8 +1309,10 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_acquire_phy_lock(bp);
- if (load_mode == LOAD_DIAG)
- bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
+ if (load_mode == LOAD_DIAG) {
+ bp->link_params.loopback_mode = LOOPBACK_XGXS;
+ bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
+ }
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
@@ -1281,7 +1324,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
bnx2x_link_report(bp);
}
-
+ bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
return rc;
}
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -1292,6 +1335,7 @@ void bnx2x_link_set(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -1310,13 +1354,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
BNX2X_ERR("Bootcode is missing - can not reset link\n");
}
-u8 bnx2x_link_test(struct bnx2x *bp)
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
{
u8 rc = 0;
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+ is_serdes);
bnx2x_release_phy_lock(bp);
} else
BNX2X_ERR("Bootcode is missing - can not test link\n");
@@ -1585,7 +1630,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
*/
/* send the MCP a request, block until there is a reply */
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
{
int func = BP_FUNC(bp);
u32 seq = ++bp->fw_seq;
@@ -1594,6 +1639,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
mutex_lock(&bp->fw_mb_mutex);
+ SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
@@ -1715,9 +1761,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
/* Report results to MCP */
if (dcc_event)
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
else
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
}
/* must be called under the spq lock */
@@ -1959,12 +2005,16 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
static inline void bnx2x_fan_failure(struct bnx2x *bp)
{
int port = BP_PORT(bp);
-
+ u32 ext_phy_config;
/* mark the failure */
- bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
- bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+
+ ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+ ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
- bp->link_params.ext_phy_config);
+ ext_phy_config);
/* log the failure */
netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
@@ -1976,7 +2026,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
{
int port = BP_PORT(bp);
int reg_offset;
- u32 val, swap_val, swap_override;
+ u32 val;
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -1990,30 +2040,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
BNX2X_ERR("SPIO5 hw attention\n");
/* Fan failure attention */
- switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- /* Low power mode is controlled by GPIO 2 */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- /* The PHY reset is controlled by GPIO 1 */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- /* The PHY reset is controlled by GPIO 1 */
- /* fake the port number to cancel the swap done in
- set_gpio() */
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
- port = (swap_val && swap_override) ^ 1;
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- break;
-
- default:
- break;
- }
+ bnx2x_hw_reset_phy(&bp->link_params);
bnx2x_fan_failure(bp);
}
@@ -3803,10 +3830,9 @@ static const struct {
static void enable_blocks_parity(struct bnx2x *bp)
{
- int i, mask_arr_len =
- sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+ int i;
- for (i = 0; i < mask_arr_len; i++)
+ for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
REG_WR(bp, bnx2x_parity_mask[i].addr,
bnx2x_parity_mask[i].mask);
}
@@ -3862,17 +3888,12 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
*/
else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
for (port = PORT_0; port < PORT_MAX; port++) {
- u32 phy_type =
- SHMEM_RD(bp, dev_info.port_hw_config[port].
- external_phy_config) &
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
is_required |=
- ((phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
- (phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
- (phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
+ bnx2x_fan_failure_det_req(
+ bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base,
+ port);
}
DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
@@ -4139,17 +4160,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
return -EBUSY;
}
- switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- bp->port.need_hw_lock = 1;
- break;
-
- default:
- break;
- }
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
bnx2x_setup_fan_failure_detection(bp);
@@ -4162,7 +4175,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- bnx2x_common_init_phy(bp, bp->common.shmem_base);
+ bnx2x_common_init_phy(bp, bp->common.shmem_base,
+ bp->common.shmem2_base);
bnx2x_release_phy_lock(bp);
} else
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -4297,55 +4311,17 @@ static int bnx2x_init_port(struct bnx2x *bp)
bnx2x_init_block(bp, MCP_BLOCK, init_stage);
bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-
- switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- {
- u32 swap_val, swap_override, aeu_gpio_mask, offset;
-
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
-
- /* The GPIO should be swapped if the swap register is
- set and active */
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-
- /* Select function upon port-swap configuration */
- if (port == 0) {
- offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
- aeu_gpio_mask = (swap_val && swap_override) ?
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
- } else {
- offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
- aeu_gpio_mask = (swap_val && swap_override) ?
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
- }
- val = REG_RD(bp, offset);
- /* add GPIO3 to group */
- val |= aeu_gpio_mask;
- REG_WR(bp, offset, val);
- }
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- /* add SPIO 5 to group 0 */
- {
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
+ if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
+ bp->common.shmem2_base, port)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
val = REG_RD(bp, reg_addr);
val |= AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(bp, reg_addr, val);
- }
- break;
-
- default:
- break;
}
-
bnx2x__link_reset(bp);
return 0;
@@ -4475,7 +4451,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
/* Reset PCIE errors for debug */
REG_WR(bp, 0x2114, 0xffffffff);
REG_WR(bp, 0x2120, 0xffffffff);
-
+ bnx2x_phy_probe(&bp->link_params);
return 0;
}
@@ -5297,7 +5273,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
unload_error:
if (!BP_NOMCP(bp))
- reset_code = bnx2x_fw_command(bp, reset_code);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
else {
DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
load_count[0], load_count[1], load_count[2]);
@@ -5322,7 +5298,7 @@ unload_error:
/* Report UNLOAD_DONE to MCP */
if (!BP_NOMCP(bp))
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
@@ -5887,13 +5863,14 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bp->fw_seq =
(SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- reset_code = bnx2x_fw_command(bp, reset_code);
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
/* if UNDI is loaded on the other port */
if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
/* send "DONE" for previous unload */
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_UNLOAD_DONE, 0);
/* unload UNDI on port 1 */
bp->func = 1;
@@ -5902,7 +5879,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
DRV_MSG_SEQ_NUMBER_MASK);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
- bnx2x_fw_command(bp, reset_code);
+ bnx2x_fw_command(bp, reset_code, 0);
}
/* now it's safe to release the lock */
@@ -5944,7 +5921,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
/* send unload done to the MCP */
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
/* restore our func and fw_seq */
bp->func = func;
@@ -5992,6 +5969,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
bp->link_params.shmem_base = bp->common.shmem_base;
+ bp->link_params.shmem2_base = bp->common.shmem2_base;
BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
bp->common.shmem_base, bp->common.shmem2_base);
@@ -6034,8 +6012,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
"please upgrade BC\n", BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
- (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
+ (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+ FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
if (BP_E1HVN(bp) == 0) {
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
@@ -6059,194 +6040,55 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
u32 switch_cfg)
{
- int port = BP_PORT(bp);
- u32 ext_phy_type;
-
- switch (switch_cfg) {
- case SWITCH_CFG_1G:
- BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
-
- ext_phy_type =
- SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_2500baseX_Full |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
+ int cfg_size = 0, idx, port = BP_PORT(bp);
- case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
+ /* Aggregation of supported attributes of all external phys */
+ bp->port.supported[0] = 0;
+ bp->port.supported[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+ cfg_size = 1;
+ break;
+ case 2:
+ bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+ cfg_size = 1;
+ break;
+ case 3:
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ } else {
+ bp->port.supported[0] =
+ bp->link_params.phy[EXT_PHY1].supported;
+ bp->port.supported[1] =
+ bp->link_params.phy[EXT_PHY2].supported;
+ }
+ cfg_size = 2;
+ break;
+ }
- default:
- BNX2X_ERR("NVRAM config error. "
- "BAD SerDes ext_phy_config 0x%x\n",
- bp->link_params.ext_phy_config);
+ if (!(bp->port.supported[0] || bp->port.supported[1])) {
+ BNX2X_ERR("NVRAM config error. BAD phy config."
+ "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
return;
}
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
port*0x10);
BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
break;
case SWITCH_CFG_10G:
- BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
-
- ext_phy_type =
- XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
- switch (ext_phy_type) {
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_2500baseX_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_2500baseX_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
- BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
- ext_phy_type);
-
- bp->port.supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_TP |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- break;
-
- case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
- BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
- bp->link_params.ext_phy_config);
- break;
-
- default:
- BNX2X_ERR("NVRAM config error. "
- "BAD XGXS ext_phy_config 0x%x\n",
- bp->link_params.ext_phy_config);
- return;
- }
-
bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
port*0x18);
BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
@@ -6255,164 +6097,183 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
default:
BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
- bp->port.link_config);
+ bp->port.link_config[0]);
return;
}
- bp->link_params.phy_addr = bp->port.phy_addr;
-
- /* mask what we support according to speed_cap_mask */
- if (!(bp->link_params.speed_cap_mask &
+ /* mask what we support according to speed_cap_mask per configuration */
+ for (idx = 0; idx < cfg_size; idx++) {
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
- bp->port.supported &= ~SUPPORTED_10baseT_Half;
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
- bp->port.supported &= ~SUPPORTED_10baseT_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
- bp->port.supported &= ~SUPPORTED_100baseT_Half;
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
- bp->port.supported &= ~SUPPORTED_100baseT_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
- bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
+ bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full);
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
- bp->port.supported &= ~SUPPORTED_2500baseX_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
- if (!(bp->link_params.speed_cap_mask &
+ if (!(bp->link_params.speed_cap_mask[idx] &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
- bp->port.supported &= ~SUPPORTED_10000baseT_Full;
+ bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+ }
- BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
+ BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+ bp->port.supported[1]);
}
static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
{
- bp->link_params.req_duplex = DUPLEX_FULL;
-
- switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+ u32 link_config, idx, cfg_size = 0;
+ bp->port.advertising[0] = 0;
+ bp->port.advertising[1] = 0;
+ switch (bp->link_params.num_phys) {
+ case 1:
+ case 2:
+ cfg_size = 1;
+ break;
+ case 3:
+ cfg_size = 2;
+ break;
+ }
+ for (idx = 0; idx < cfg_size; idx++) {
+ bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+ link_config = bp->port.link_config[idx];
+ switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_AUTO:
- if (bp->port.supported & SUPPORTED_Autoneg) {
- bp->link_params.req_line_speed = SPEED_AUTO_NEG;
- bp->port.advertising = bp->port.supported;
+ if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_AUTO_NEG;
+ bp->port.advertising[idx] |=
+ bp->port.supported[idx];
} else {
- u32 ext_phy_type =
- XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-
- if ((ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
- (ext_phy_type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
- /* force 10G, no AN */
- bp->link_params.req_line_speed = SPEED_10000;
- bp->port.advertising =
- (ADVERTISED_10000baseT_Full |
+ /* force 10G, no AN */
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
- break;
- }
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " Autoneg not supported\n",
- bp->port.link_config);
- return;
+ continue;
}
break;
case PORT_FEATURE_LINK_SPEED_10M_FULL:
- if (bp->port.supported & SUPPORTED_10baseT_Full) {
- bp->link_params.req_line_speed = SPEED_10;
- bp->port.advertising = (ADVERTISED_10baseT_Full |
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_10M_HALF:
- if (bp->port.supported & SUPPORTED_10baseT_Half) {
- bp->link_params.req_line_speed = SPEED_10;
- bp->link_params.req_duplex = DUPLEX_HALF;
- bp->port.advertising = (ADVERTISED_10baseT_Half |
+ if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10;
+ bp->link_params.req_duplex[idx] =
+ DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_100M_FULL:
- if (bp->port.supported & SUPPORTED_100baseT_Full) {
- bp->link_params.req_line_speed = SPEED_100;
- bp->port.advertising = (ADVERTISED_100baseT_Full |
+ if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_100;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
- if (bp->port.supported & SUPPORTED_100baseT_Half) {
- bp->link_params.req_line_speed = SPEED_100;
- bp->link_params.req_duplex = DUPLEX_HALF;
- bp->port.advertising = (ADVERTISED_100baseT_Half |
+ if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
+ bp->link_params.req_line_speed[idx] = SPEED_100;
+ bp->link_params.req_duplex[idx] = DUPLEX_HALF;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_1G:
- if (bp->port.supported & SUPPORTED_1000baseT_Full) {
- bp->link_params.req_line_speed = SPEED_1000;
- bp->port.advertising = (ADVERTISED_1000baseT_Full |
+ if (bp->port.supported[idx] &
+ SUPPORTED_1000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_1000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
case PORT_FEATURE_LINK_SPEED_2_5G:
- if (bp->port.supported & SUPPORTED_2500baseX_Full) {
- bp->link_params.req_line_speed = SPEED_2500;
- bp->port.advertising = (ADVERTISED_2500baseX_Full |
+ if (bp->port.supported[idx] &
+ SUPPORTED_2500baseX_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_2500;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
@@ -6420,16 +6281,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
case PORT_FEATURE_LINK_SPEED_10G_CX4:
case PORT_FEATURE_LINK_SPEED_10G_KX4:
case PORT_FEATURE_LINK_SPEED_10G_KR:
- if (bp->port.supported & SUPPORTED_10000baseT_Full) {
- bp->link_params.req_line_speed = SPEED_10000;
- bp->port.advertising = (ADVERTISED_10000baseT_Full |
+ if (bp->port.supported[idx] &
+ SUPPORTED_10000baseT_Full) {
+ bp->link_params.req_line_speed[idx] =
+ SPEED_10000;
+ bp->port.advertising[idx] |=
+ (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
BNX2X_ERROR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ link_config,
+ bp->link_params.speed_cap_mask[idx]);
return;
}
break;
@@ -6437,23 +6301,28 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
default:
BNX2X_ERROR("NVRAM config error. "
"BAD link speed link_config 0x%x\n",
- bp->port.link_config);
- bp->link_params.req_line_speed = SPEED_AUTO_NEG;
- bp->port.advertising = bp->port.supported;
+ link_config);
+ bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
+ bp->port.advertising[idx] = bp->port.supported[idx];
break;
}
- bp->link_params.req_flow_ctrl = (bp->port.link_config &
+ bp->link_params.req_flow_ctrl[idx] = (link_config &
PORT_FEATURE_FLOW_CONTROL_MASK);
- if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
- !(bp->port.supported & SUPPORTED_Autoneg))
- bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ if ((bp->link_params.req_flow_ctrl[idx] ==
+ BNX2X_FLOW_CTRL_AUTO) &&
+ !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
+ bp->link_params.req_flow_ctrl[idx] =
+ BNX2X_FLOW_CTRL_NONE;
+ }
- BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
- " advertising 0x%x\n",
- bp->link_params.req_line_speed,
- bp->link_params.req_duplex,
- bp->link_params.req_flow_ctrl, bp->port.advertising);
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
+ " 0x%x advertising 0x%x\n",
+ bp->link_params.req_line_speed[idx],
+ bp->link_params.req_duplex[idx],
+ bp->link_params.req_flow_ctrl[idx],
+ bp->port.advertising[idx]);
+ }
}
static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
@@ -6469,48 +6338,28 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
int port = BP_PORT(bp);
u32 val, val2;
u32 config;
- u16 i;
- u32 ext_phy_type;
+ u32 ext_phy_type, ext_phy_config;;
bp->link_params.bp = bp;
bp->link_params.port = port;
bp->link_params.lane_config =
SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
- bp->link_params.ext_phy_config =
- SHMEM_RD(bp,
- dev_info.port_hw_config[port].external_phy_config);
- /* BCM8727_NOC => BCM8727 no over current */
- if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
- bp->link_params.ext_phy_config &=
- ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
- bp->link_params.ext_phy_config |=
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
- bp->link_params.feature_config_flags |=
- FEATURE_CONFIG_BCM8727_NOC;
- }
- bp->link_params.speed_cap_mask =
+ bp->link_params.speed_cap_mask[0] =
SHMEM_RD(bp,
dev_info.port_hw_config[port].speed_capability_mask);
-
- bp->port.link_config =
+ bp->link_params.speed_cap_mask[1] =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].speed_capability_mask2);
+ bp->port.link_config[0] =
SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
- /* Get the 4 lanes xgxs config rx and tx */
- for (i = 0; i < 2; i++) {
- val = SHMEM_RD(bp,
- dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
- bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
- bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
-
- val = SHMEM_RD(bp,
- dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
- bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
- bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
- }
+ bp->port.link_config[1] =
+ SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+ bp->link_params.multi_phy_config =
+ SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
/* If the device is capable of WoL, set the default state according
* to the HW
*/
@@ -6518,14 +6367,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
- BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
- " speed_cap_mask 0x%08x link_config 0x%08x\n",
+ BNX2X_DEV_INFO("lane_config 0x%08x"
+ "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
- bp->link_params.ext_phy_config,
- bp->link_params.speed_cap_mask, bp->port.link_config);
+ bp->link_params.speed_cap_mask[0],
+ bp->port.link_config[0]);
- bp->link_params.switch_cfg |= (bp->port.link_config &
+ bp->link_params.switch_cfg = (bp->port.link_config[0] &
PORT_FEATURE_CONNECTED_SWITCH_MASK);
+ bnx2x_phy_probe(&bp->link_params);
bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
bnx2x_link_settings_requested(bp);
@@ -6534,14 +6384,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
* If connected directly, work with the internal PHY, otherwise, work
* with the external PHY
*/
- ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+ ext_phy_config =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config);
+ ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- bp->mdio.prtad = bp->link_params.phy_addr;
+ bp->mdio.prtad = bp->port.phy_addr;
else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
(ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
bp->mdio.prtad =
- XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+ XGXS_EXT_PHY_ADDR(ext_phy_config);
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
@@ -6766,7 +6619,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = MAX_TX_AVAIL;
- bp->rx_ring_size = MAX_RX_AVAIL;
bp->rx_csum = 1;
@@ -6977,23 +6829,15 @@ static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
struct bnx2x *bp = netdev_priv(netdev);
u16 value;
int rc;
- u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
prtad, devad, addr);
- if (prtad != bp->mdio.prtad) {
- DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
- prtad, bp->mdio.prtad);
- return -EINVAL;
- }
-
/* The HW expects different devad if CL22 is used */
devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
- devad, addr, &value);
+ rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
bnx2x_release_phy_lock(bp);
DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
@@ -7007,24 +6851,16 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
struct bnx2x *bp = netdev_priv(netdev);
- u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
int rc;
DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
" value 0x%x\n", prtad, devad, addr, value);
- if (prtad != bp->mdio.prtad) {
- DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
- prtad, bp->mdio.prtad);
- return -EINVAL;
- }
-
/* The HW expects different devad if CL22 is used */
devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
- devad, addr, value);
+ rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
bnx2x_release_phy_lock(bp);
return rc;
}
@@ -7254,7 +7090,7 @@ static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
}
-static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
+static int bnx2x_check_firmware(struct bnx2x *bp)
{
const struct firmware *firmware = bp->firmware;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7365,7 +7201,7 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
+int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7376,21 +7212,21 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
else {
- dev_err(dev, "Unsupported chip revision\n");
+ BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
- dev_info(dev, "Loading %s\n", fw_file_name);
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
- rc = request_firmware(&bp->firmware, fw_file_name, dev);
+ rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
+ BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -7468,13 +7304,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
if (rc)
goto init_one_exit;
- /* Set init arrays */
- rc = bnx2x_init_firmware(bp, &pdev->dev);
- if (rc) {
- dev_err(&pdev->dev, "Error loading firmware\n");
- goto init_one_exit;
- }
-
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
@@ -7525,11 +7354,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->reset_task);
- kfree(bp->init_ops_offsets);
- kfree(bp->init_ops);
- kfree(bp->init_data);
- release_firmware(bp->firmware);
-
if (bp->regview)
iounmap(bp->regview);
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index a1f3bf0cd630..6be0d09ad3fd 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -4964,6 +4964,8 @@
#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
@@ -5135,28 +5137,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
-#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+
+#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
#define MDIO_PMA_REG_7101_RESET 0xc000
#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
#define MDIO_PMA_REG_7101_VER1 0xc026
#define MDIO_PMA_REG_7101_VER2 0xc027
-#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
-#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
-#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
-#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
-#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
-#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
-#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
#define MDIO_WIS_DEVAD 0x2
@@ -5188,6 +5197,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
+
#define MDIO_AN_DEVAD 0x7
/*ieee*/
#define MDIO_AN_REG_CTRL 0x0000
@@ -5210,14 +5221,40 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_CL37_FC_LP 0xffe5
#define MDIO_AN_REG_8073_2_5G 0x8329
+#define MDIO_AN_REG_8073_BAM 0x8350
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD 0x1e
+#define MDIO_CTL_REG_84823_MEDIA 0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
+ /* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
+ /* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
+ /* When this pin is active high during reset, 10GBASE-T core is power
+ * down, When it is active low the 10GBASE-T is power up
+ */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+
+
#define IGU_FUNC_BASE 0x0400
#define IGU_ADDR_MSIX 0x0000
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c74724461020..efa1403ebf82 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -969,6 +969,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
{
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct net_device_stats *nstats = &bp->dev->stats;
+ unsigned long tmp;
int i;
nstats->rx_packets =
@@ -985,10 +986,10 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
- nstats->rx_dropped = estats->mac_discard;
+ tmp = estats->mac_discard;
for_each_queue(bp, i)
- nstats->rx_dropped +=
- le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ nstats->rx_dropped = tmp;
nstats->tx_dropped = 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc31892..fb70c3e12927 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* so it can wait
*/
bond_for_each_slave(bond, slave, i) {
+ unsigned long trans_start = dev_trans_start(slave->dev);
+
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
- time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks) &&
+ time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
- (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+ if (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave->dev->last_rx - delta_in_ticks,
+ slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
{
struct slave *slave;
int i, commit = 0;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
if (slave->link != BOND_LINK_UP) {
- if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
- delta_in_ticks)) {
+ if (time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + delta_in_ticks)) {
+
slave->new_link = BOND_LINK_UP;
commit++;
}
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (!time_after_eq(jiffies, slave->jiffies +
- 2 * delta_in_ticks))
+ if (time_in_range(jiffies,
+ slave->jiffies - delta_in_ticks,
+ slave->jiffies + 2 * delta_in_ticks))
continue;
/*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (slave->state == BOND_STATE_BACKUP &&
!bond->current_arp_slave &&
- time_after(jiffies, slave_last_rx(bond, slave) +
- 3 * delta_in_ticks)) {
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* - (more than 2*delta since receive AND
* the bond has an IP address)
*/
+ trans_start = dev_trans_start(slave->dev);
if ((slave->state == BOND_STATE_ACTIVE) &&
- (time_after_eq(jiffies, dev_trans_start(slave->dev) +
- 2 * delta_in_ticks) ||
- (time_after_eq(jiffies, slave_last_rx(bond, slave)
- + 2 * delta_in_ticks)))) {
+ (!time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + 2 * delta_in_ticks) ||
+ !time_in_range(jiffies,
+ slave_last_rx(bond, slave) - delta_in_ticks,
+ slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
{
struct slave *slave;
int i;
+ unsigned long trans_start;
bond_for_each_slave(bond, slave, i) {
switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
continue;
case BOND_LINK_UP:
+ trans_start = dev_trans_start(slave->dev);
if ((!bond->curr_active_slave &&
- time_before_eq(jiffies,
- dev_trans_start(slave->dev) +
- delta_in_ticks)) ||
+ time_in_range(jiffies,
+ trans_start - delta_in_ticks,
+ trans_start + delta_in_ticks)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
@@ -4656,6 +4678,10 @@ static void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER);
+ /* By default, we enable GRO on bonding devices.
+ * Actual support requires lowlevel drivers are GRO ready.
+ */
+ bond_dev->features |= NETIF_F_GRO;
}
static void bond_work_cancel_all(struct bonding *bond)
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a6242b011..75bfc3a9d95f 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
config CAIF_SPI_SLAVE
tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF
+ depends on CAIF && HAS_DMA
default n
---help---
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 077ccf840edf..2111dbfea6fe 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -22,13 +22,13 @@
#include <net/caif/caif_spi.h>
#ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS SPI_CMD_SZ
+#define SPI_DATA_POS 0
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return cfspi->rx_cpck_len;
}
#else
-#define SPI_DATA_POS 0
+#define SPI_DATA_POS SPI_CMD_SZ
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{
return 0;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index af753936e835..312b9c8f4f3b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -38,7 +38,7 @@
struct mpc5xxx_can_data {
unsigned int type;
- u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name,
int *mscan_clksrc);
};
@@ -48,7 +48,7 @@ static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{}
};
-static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -101,7 +101,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
return freq;
}
#else /* !CONFIG_PPC_MPC52xx */
-static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -129,7 +129,7 @@ static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
{}
};
-static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -143,12 +143,12 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
if (!np_clock) {
dev_err(&ofdev->dev, "couldn't find clock node\n");
- return -ENODEV;
+ return 0;
}
clockctl = of_iomap(np_clock, 0);
if (!clockctl) {
dev_err(&ofdev->dev, "couldn't map clock registers\n");
- return 0;
+ goto exit_put;
}
/* Determine the MSCAN device index from the physical address */
@@ -233,13 +233,13 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
exit_unmap:
- of_node_put(np_clock);
iounmap(clockctl);
-
+exit_put:
+ of_node_put(np_clock);
return freq;
}
#else /* !CONFIG_PPC_MPC512x */
-static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
const char *clock_name,
int *mscan_clksrc)
{
@@ -247,7 +247,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
}
#endif /* CONFIG_PPC_MPC512x */
-static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
+static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
const struct of_device_id *id)
{
struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
@@ -317,7 +317,7 @@ exit_unmap_mem:
return err;
}
-static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
+static int __devexit mpc5xxx_can_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
@@ -334,7 +334,7 @@ static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
#ifdef CONFIG_PM
static struct mscan_regs saved_regs;
-static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
+static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
@@ -345,7 +345,7 @@ static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
return 0;
}
-static int mpc5xxx_can_resume(struct of_device *ofdev)
+static int mpc5xxx_can_resume(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct mscan_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index ac1a83d7c204..5bfccfdf3bbb 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -67,7 +67,7 @@ static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
out_8(priv->reg_base + reg, val);
}
-static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
+static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
@@ -87,7 +87,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
return 0;
}
-static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
+static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
const struct of_device_id *id)
{
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 28c88eeec757..32aaadc4734f 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2149,7 +2149,7 @@ end_copy_pkt:
skb->csum = csum_unfold(~csum);
skb->ip_summed = CHECKSUM_COMPLETE;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
return len;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f01cfdb995de..1950b9a20ecd 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1388,7 +1388,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
++st->rx_cso_good;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
st->vlan_xtract++;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 599d178df62d..63ebf76d2390 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -314,14 +314,12 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
return 0;
}
-#if defined(CONFIG_CHELSIO_T1_1G)
static const struct mdio_ops mi1_mdio_ops = {
.init = mi1_mdio_init,
.read = mi1_mdio_read,
.write = mi1_mdio_write,
.mode_support = MDIO_SUPPORTS_C22
};
-#endif
#endif
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 09610323a948..2ab6a7c4ffc1 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1022,7 +1022,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
if (blks > cp->ethdev->ctx_tbl_len)
return -ENOMEM;
- cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
+ cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index e1f6156b3710..fec939f8f65f 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -38,7 +38,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#include <asm/atomic.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
#define CPMAC_RX_INT_CLEAR 0x019c
#define CPMAC_MAC_INT_ENABLE 0x01a8
#define CPMAC_MAC_INT_CLEAR 0x01ac
-#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
#define CPMAC_MAC_ADDR_MID 0x01d0
#define CPMAC_MAC_ADDR_HI 0x01d4
#define CPMAC_MAC_HASH_LO 0x01d8
@@ -227,7 +227,7 @@ static void cpmac_dump_regs(struct net_device *dev)
for (i = 0; i < CPMAC_REG_END; i += 4) {
if (i % 16 == 0) {
if (i)
- printk("\n");
+ pr_cont("\n");
printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
priv->regs + i);
}
@@ -262,7 +262,7 @@ static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
for (i = 0; i < skb->len; i++) {
if (i % 16 == 0) {
if (i)
- printk("\n");
+ pr_cont("\n");
printk(KERN_DEBUG "%s: data[%p]:", dev->name,
skb->data + i);
}
@@ -391,7 +391,7 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
if (likely(skb)) {
skb_put(desc->skb, desc->datalen);
desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
- desc->skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(desc->skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += desc->datalen;
result = desc->skb;
@@ -506,7 +506,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
"restart rx from a descriptor that's "
"not free: %p\n",
priv->dev->name, restart);
- goto fatal_error;
+ goto fatal_error;
}
cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
@@ -873,7 +873,8 @@ static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
-static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static void cpmac_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -888,7 +889,8 @@ static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam
ring->tx_pending = 1;
}
-static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static int cpmac_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -1012,8 +1014,8 @@ static int cpmac_open(struct net_device *dev)
priv->rx_head->prev->hw_next = (u32)0;
- if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
- dev->name, dev))) {
+ res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
+ if (res) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to obtain irq\n",
dev->name);
@@ -1133,7 +1135,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
}
if (phy_id == PHY_MAX_ADDR) {
- dev_err(&pdev->dev, "no PHY present, falling back to switch on MDIO bus 0\n");
+ dev_err(&pdev->dev, "no PHY present, falling back "
+ "to switch on MDIO bus 0\n");
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
}
@@ -1169,9 +1172,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
- snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
@@ -1182,7 +1186,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
goto fail;
}
- if ((rc = register_netdev(dev))) {
+ rc = register_netdev(dev);
+ if (rc) {
printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
dev->name);
goto fail;
@@ -1248,11 +1253,13 @@ int __devinit cpmac_init(void)
cpmac_mii->reset(cpmac_mii);
- for (i = 0; i < 300; i++)
- if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE)))
+ for (i = 0; i < 300; i++) {
+ mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
+ if (mask)
break;
else
msleep(10);
+ }
mask &= 0x7fffffff;
if (mask & (mask - 1)) {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585d960b..1ecf53dafe06 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,7 +1286,7 @@ irq_err:
/*
* Release resources when all the ports and offloading have been stopped.
*/
-static void cxgb_down(struct adapter *adapter)
+static void cxgb_down(struct adapter *adapter, int on_wq)
{
t3_sge_stop(adapter);
spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
@@ -1296,7 +1296,8 @@ static void cxgb_down(struct adapter *adapter)
free_irq_resources(adapter);
quiesce_rx(adapter);
t3_sge_stop(adapter);
- flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
+ if (!on_wq)
+ flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
}
static void schedule_chk_task(struct adapter *adap)
@@ -1374,7 +1375,7 @@ static int offload_close(struct t3cdev *tdev)
clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
if (!adapter->open_device_map)
- cxgb_down(adapter);
+ cxgb_down(adapter, 0);
cxgb3_offload_deactivate(adapter);
return 0;
@@ -1409,7 +1410,7 @@ static int cxgb_open(struct net_device *dev)
return 0;
}
-static int cxgb_close(struct net_device *dev)
+static int __cxgb_close(struct net_device *dev, int on_wq)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1436,12 +1437,17 @@ static int cxgb_close(struct net_device *dev)
cancel_delayed_work_sync(&adapter->adap_check_task);
if (!adapter->open_device_map)
- cxgb_down(adapter);
+ cxgb_down(adapter, on_wq);
cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
return 0;
}
+static int cxgb_close(struct net_device *dev)
+{
+ return __cxgb_close(dev, 0);
+}
+
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
@@ -2862,7 +2868,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
spin_unlock(&adapter->work_lock);
}
-static int t3_adapter_error(struct adapter *adapter, int reset)
+static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
{
int i, ret = 0;
@@ -2877,7 +2883,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
struct net_device *netdev = adapter->port[i];
if (netif_running(netdev))
- cxgb_close(netdev);
+ __cxgb_close(netdev, on_wq);
}
/* Stop SGE timers */
@@ -2948,7 +2954,7 @@ static void fatal_error_task(struct work_struct *work)
int err = 0;
rtnl_lock();
- err = t3_adapter_error(adapter, 1);
+ err = t3_adapter_error(adapter, 1, 1);
if (!err)
err = t3_reenable_adapter(adapter);
if (!err)
@@ -2998,7 +3004,7 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- ret = t3_adapter_error(adapter, 0);
+ ret = t3_adapter_error(adapter, 0, 0);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index cb42353c9fdd..6990f6c65221 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1997,6 +1997,10 @@
#define A_PL_RST 0x6f0
+#define S_FATALPERREN 4
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN V_FATALPERREN(1U)
+
#define S_CRSTWRM 1
#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
#define F_CRSTWRM V_CRSTWRM(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 8ff96c6f6de5..c5a142bea5e9 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2022,7 +2022,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
if (unlikely(p->vlan_valid)) {
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 427c451be1a7..421d5589cecd 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1408,6 +1408,7 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
fatal++;
CH_ALERT(adapter, "%s (0x%x)\n",
acts->msg, status & acts->mask);
+ status &= ~acts->mask;
} else if (acts->msg)
CH_WARN(adapter, "%s (0x%x)\n",
acts->msg, status & acts->mask);
@@ -1843,11 +1844,10 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
t3_os_link_fault_handler(adap, idx);
}
- t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
-
if (cause & XGM_INTR_FATAL)
t3_fatal_err(adap);
+ t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
return cause != 0;
}
@@ -3569,6 +3569,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
t3_write_reg(adapter, A_PM1_TX_MODE, 0);
chan_init_hw(adapter, adapter->params.chan_map);
t3_sge_init(adapter, &adapter->params.sge);
+ t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@@ -3682,7 +3683,7 @@ static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
mc7->name = name;
mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
- mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
+ mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
mc7->width = G_WIDTH(cfg);
}
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 6e562c0dad7d..3ece9f5069fa 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -463,6 +463,8 @@ struct sge {
u8 counter_val[SGE_NCOUNTERS];
unsigned int starve_thres;
u8 idma_state[2];
+ unsigned int egr_start;
+ unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c327527fbbc8..75b9401fd484 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -175,16 +175,26 @@ enum {
static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0xa000, 0), /* PE10K */
- CH_DEVICE(0x4001, 0),
- CH_DEVICE(0x4002, 0),
- CH_DEVICE(0x4003, 0),
- CH_DEVICE(0x4004, 0),
- CH_DEVICE(0x4005, 0),
- CH_DEVICE(0x4006, 0),
- CH_DEVICE(0x4007, 0),
- CH_DEVICE(0x4008, 0),
- CH_DEVICE(0x4009, 0),
- CH_DEVICE(0x400a, 0),
+ CH_DEVICE(0x4001, -1),
+ CH_DEVICE(0x4002, -1),
+ CH_DEVICE(0x4003, -1),
+ CH_DEVICE(0x4004, -1),
+ CH_DEVICE(0x4005, -1),
+ CH_DEVICE(0x4006, -1),
+ CH_DEVICE(0x4007, -1),
+ CH_DEVICE(0x4008, -1),
+ CH_DEVICE(0x4009, -1),
+ CH_DEVICE(0x400a, -1),
+ CH_DEVICE(0x4401, 4),
+ CH_DEVICE(0x4402, 4),
+ CH_DEVICE(0x4403, 4),
+ CH_DEVICE(0x4404, 4),
+ CH_DEVICE(0x4405, 4),
+ CH_DEVICE(0x4406, 4),
+ CH_DEVICE(0x4407, 4),
+ CH_DEVICE(0x4408, 4),
+ CH_DEVICE(0x4409, 4),
+ CH_DEVICE(0x440a, 4),
{ 0, }
};
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
- struct sge_txq *txq = q->adap->sge.egr_map[qid];
+ struct sge_txq *txq;
+ txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
txq->restarts++;
- if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
+ if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
struct sge_eth_txq *eq;
eq = container_of(txq, struct sge_eth_txq, q);
@@ -658,6 +669,15 @@ static int setup_rss(struct adapter *adap)
}
/*
+ * Return the channel of the ingress queue with the given qid.
+ */
+static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
+{
+ qid -= p->ingr_start;
+ return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
+}
+
+/*
* Wait until all NAPI handlers are descheduled.
*/
static void quiesce_rx(struct adapter *adap)
@@ -1671,27 +1691,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
return 0;
}
-/*
- * Translate a physical EEPROM address to virtual. The first 1K is accessed
- * through virtual addresses starting at 31K, the rest is accessed through
- * virtual addresses starting at 0. This mapping is correct only for PF0.
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
*/
-static int eeprom_ptov(unsigned int phys_addr)
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
{
+ fn *= sz;
if (phys_addr < 1024)
return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
if (phys_addr < EEPROMSIZE)
- return phys_addr - 1024;
+ return phys_addr - 1024 - fn;
return -EINVAL;
}
/*
* The next two routines implement eeprom read/write from physical addresses.
- * The physical->virtual translation is correct only for PF0.
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr);
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1700,7 +1734,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr);
+ int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1743,6 +1777,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+ if (adapter->fn > 0) {
+ u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/*
* RMW possibly needed for first or last words.
@@ -2304,7 +2346,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->peer_port = htons(0);
req->local_ip = sip;
req->peer_ip = htonl(0);
- chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2346,7 +2388,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
req->peer_ip_hi = cpu_to_be64(0);
req->peer_ip_lo = cpu_to_be64(0);
- chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+ chan = rxq_to_chan(&adap->sge, queue);
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -3061,12 +3103,16 @@ static int adap_init0(struct adapter *adap)
params[2] = FW_PARAM_PFVF(L2T_END);
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
+ params[5] = FW_PARAM_PFVF(IQFLINT_START);
+ params[6] = FW_PARAM_PFVF(EQ_START);
+ ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
if (ret < 0)
goto bye;
port_vec = val[0];
adap->tids.ftid_base = val[3];
adap->tids.nftids = val[4] - val[3] + 1;
+ adap->sge.ingr_start = val[5];
+ adap->sge.egr_start = val[6];
if (c.ofldcaps) {
/* query offload-related parameters */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index bf38cfc57565..9967f3debce7 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -557,7 +557,8 @@ out: cred = q->avail - cred;
if (unlikely(fl_starving(q))) {
smp_wmb();
- set_bit(q->cntxt_id, adap->sge.starving_fl);
+ set_bit(q->cntxt_id - adap->sge.egr_start,
+ adap->sge.starving_fl);
}
return cred;
@@ -974,7 +975,7 @@ out_free: dev_kfree_skb(skb);
}
cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
+ TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1213,7 +1214,8 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
{
q->mapping_err++;
q->q.stops++;
- set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
+ set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
+ q->adap->sge.txq_maperr);
}
/**
@@ -1603,7 +1605,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.rx_cso++;
}
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -1835,6 +1837,7 @@ static unsigned int process_intrq(struct adapter *adap)
if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
+ qid -= adap->sge.ingr_start;
napi_schedule(&adap->sge.ingr_map[qid]->napi);
}
@@ -2050,14 +2053,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
/* set offset to -1 to distinguish ingress queues without FL */
iq->offset = fl ? 0 : -1;
- adap->sge.ingr_map[iq->cntxt_id] = iq;
+ adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
if (fl) {
fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
- adap->sge.egr_map[fl->cntxt_id] = fl;
+ adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
@@ -2087,7 +2090,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
q->cntxt_id = id;
- adap->sge.egr_map[id] = q;
+ adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
@@ -2259,7 +2262,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
{
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
- adap->sge.ingr_map[rq->cntxt_id] = NULL;
+ adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 10a055565776..c26b455f37de 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -42,6 +42,7 @@ enum {
MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
EEPROMSIZE = 17408, /* Serial EEPROM physical size */
EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 0969f2fbc1b0..940584a8a640 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -487,6 +487,11 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
+ FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
+ FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
+ FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
};
/*
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index eb5a1c9cb2d3..f10864ddafbe 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1520,7 +1520,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
- skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
@@ -1535,7 +1534,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
rxq->stats.rx_cso++;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index d7de376d7178..219eb5ad5c12 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1255,7 +1255,7 @@ static int __devinit dec_lance_probe(struct device *bdev, const int type)
*/
init_timer(&lp->multicast_timer);
lp->multicast_timer.data = (unsigned long) dev;
- lp->multicast_timer.function = &lance_set_multicast_retry;
+ lp->multicast_timer.function = lance_set_multicast_retry;
ret = register_netdev(dev);
if (ret) {
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index a2f238d20caa..e1a8216ff692 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -465,7 +465,7 @@ rio_open (struct net_device *dev)
init_timer (&np->timer);
np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long) dev;
- np->timer.function = &rio_timer;
+ np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 4fd6b2b4554b..9f6aeefa06bf 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1056,7 +1056,7 @@ dm9000_rx(struct net_device *dev)
if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
netif_rx(skb);
dev->stats.rx_packets++;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5cc39ed289c6..8d9269d12a67 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -790,6 +790,70 @@ static const struct net_device_ops e1000_netdev_ops = {
};
/**
+ * e1000_init_hw_struct - initialize members of hw struct
+ * @adapter: board private struct
+ * @hw: structure used by e1000_hw.c
+ *
+ * Factors out initialization of the e1000_hw struct to its own function
+ * that can be called very early at init (just after struct allocation).
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ * Returns negative error codes if MAC type setup fails.
+ */
+static int e1000_init_hw_struct(struct e1000_adapter *adapter,
+ struct e1000_hw *hw)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ hw->max_frame_size = adapter->netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+ if (e1000_set_mac_type(hw)) {
+ e_err(probe, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+ e1000_get_bus_info(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ return 0;
+}
+
+/**
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in e1000_pci_tbl
@@ -826,22 +890,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA config, aborting\n");
- goto err_dma;
- }
- }
- pci_using_dac = 0;
- }
-
err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
if (err)
goto err_pci_reg;
@@ -885,6 +933,32 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
}
+ /* make ready for any if (hw->...) below */
+ err = e1000_init_hw_struct(adapter, hw);
+ if (err)
+ goto err_sw_init;
+
+ /*
+ * there is a workaround being applied below that limits
+ * 64-bit DMA addresses to 64-bit hardware. There are some
+ * 32-bit adapters that Tx hang when given 64-bit DMA addresses
+ */
+ pci_using_dac = 0;
+ if ((hw->bus_type == e1000_bus_type_pcix) &&
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /*
+ * according to DMA-API-HOWTO, coherent calls will always
+ * succeed if the set call did
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 1;
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ pr_err("No usable DMA config, aborting\n");
+ goto err_dma;
+ }
+
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -959,18 +1033,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(netdev->perm_addr))
e_err(probe, "Invalid MAC Address\n");
- e1000_get_bus_info(hw);
-
init_timer(&adapter->tx_fifo_stall_timer);
- adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+ adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.function = e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.function = e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
@@ -1072,6 +1144,7 @@ err_eeprom:
iounmap(hw->flash_address);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
+err_dma:
err_sw_init:
iounmap(hw->hw_addr);
err_ioremap:
@@ -1079,7 +1152,6 @@ err_ioremap:
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
-err_dma:
pci_disable_device(pdev);
return err;
}
@@ -1131,62 +1203,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* @adapter: board private structure to initialize
*
* e1000_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
+ * e1000_init_hw_struct MUST be called before this function
**/
static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_id = pdev->subsystem_device;
- hw->revision_id = pdev->revision;
-
- pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
-
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- hw->max_frame_size = netdev->mtu +
- ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
-
- /* identify the MAC */
-
- if (e1000_set_mac_type(hw)) {
- e_err(probe, "Unknown MAC Type\n");
- return -EIO;
- }
-
- switch (hw->mac_type) {
- default:
- break;
- case e1000_82541:
- case e1000_82547:
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- hw->phy_init_script = 1;
- break;
- }
-
- e1000_set_media_type(hw);
-
- hw->wait_autoneg_complete = false;
- hw->tbi_compatibility_en = true;
- hw->adaptive_ifs = true;
-
- /* Copper options */
-
- if (hw->media_type == e1000_media_type_copper) {
- hw->mdix = AUTO_ALL_MODES;
- hw->disable_polarity_correction = false;
- hw->master_slave = E1000_MASTER_SLAVE;
- }
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
@@ -3552,7 +3574,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct e1000_hw *hw = &adapter->hw;
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
- skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
/* 82543 or newer only */
if (unlikely(hw->mac_type < e1000_82543)) return;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index a4a0d2b6eb1c..d3d4a57e2450 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- /* Install any alternate MAC address into RAR0 */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- return ret_val;
+ if (hw->mac.type == e1000_82571) {
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
- e1000e_set_laa_state_82571(hw, true);
+ e1000e_set_laa_state_82571(hw, true);
+ }
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
- * so that it will override the Si installed default perm
- * address.
- */
- ret_val = e1000_check_alt_mac_addr_generic(hw);
- if (ret_val)
- goto out;
+ if (hw->mac.type == e1000_82571) {
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+ }
ret_val = e1000_read_mac_addr_generic(hw);
@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L1,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 307a72f483ee..93b3bedae8d2 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -621,6 +621,7 @@
#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
+#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
@@ -643,6 +644,9 @@
/* Mask bits for fields in Word 0x1a of the NVM */
#define NVM_WORD1A_ASPM_MASK 0x000C
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index df4a27922931..0fd4eb5ac5fb 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
+ ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ goto out;
+
+ /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+ if (!((nvm_data & NVM_COMPAT_LOM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+ goto out;
+
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c3dd590d87b2..e2c7e0d767b1 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -475,7 +475,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
- skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set */
if (status & E1000_RXD_STAT_IXSM)
@@ -3411,22 +3412,16 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_MSI_TEST_FAILED) {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
- err = -EIO;
- e_info("MSI interrupt test failed!\n");
- }
+ e_info("MSI interrupt test failed, using legacy interrupt.\n");
+ } else
+ e_dbg("MSI interrupt test succeeded!\n");
free_irq(adapter->pdev->irq, netdev);
pci_disable_msi(adapter->pdev);
- if (err == -EIO)
- goto msi_test_failed;
-
- /* okay so the test worked, restore settings */
- e_dbg("MSI interrupt test succeeded!\n");
msi_test_failed:
e1000e_set_interrupt_capability(adapter);
- e1000_request_irq(adapter);
- return err;
+ return e1000_request_irq(adapter);
}
/**
@@ -3458,21 +3453,6 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
}
- /* success ! */
- if (!err)
- return 0;
-
- /* EIO means MSI test failed */
- if (err != -EIO)
- return err;
-
- /* back to INTx mode */
- e_warn("MSI interrupt test failed, using legacy interrupt.\n");
-
- e1000_free_irq(adapter);
-
- err = e1000_request_irq(adapter);
-
return err;
}
@@ -5745,11 +5725,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.function = e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.function = e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
@@ -5829,11 +5809,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
- pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -5879,8 +5856,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
- pm_runtime_get_sync(&pdev->dev);
-
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
@@ -5905,11 +5880,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- }
- pm_runtime_put_noidle(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 8d97f168f018..7c826319ee5a 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1457,11 +1457,11 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
if (net_debug > 5)
printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
- /* determine how much of the transmit buffer space is available */
- if (lp->tx_end > lp->tx_start)
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
- else if (lp->tx_end < lp->tx_start)
- tx_available = lp->tx_start - lp->tx_end;
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
else tx_available = lp->xmt_ram;
if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 0060e422f171..1846623c6ae6 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0105"
+#define DRV_VERSION "EHEA_0106"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+ int sq_restart_flag;
};
@@ -413,7 +414,7 @@ struct ehea_port_res {
struct ehea_adapter {
u64 handle;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct ehea_port *port[EHEA_MAX_PORTS];
struct ehea_eq *neq; /* notification event queue */
struct tasklet_struct neq_tasklet;
@@ -465,7 +466,7 @@ struct ehea_port {
struct net_device *netdev;
struct net_device_stats stats;
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
- struct of_device ofdev; /* Open Firmware Device */
+ struct platform_device ofdev; /* Open Firmware Device */
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
struct vlan_group *vgrp;
struct ehea_eq *qp_eq;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3beba70b7dea..190fb691d20b 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -107,10 +107,10 @@ struct ehea_fw_handle_array ehea_fw_handles;
struct ehea_bcmc_reg_array ehea_bcmc_regs;
-static int __devinit ehea_probe_adapter(struct of_device *dev,
+static int __devinit ehea_probe_adapter(struct platform_device *dev,
const struct of_device_id *id);
-static int __devexit ehea_remove(struct of_device *dev);
+static int __devexit ehea_remove(struct platform_device *dev);
static struct of_device_id ehea_device_table[] = {
{
@@ -180,7 +180,7 @@ static void ehea_update_firmware_handles(void)
num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
if (num_fw_handles) {
- arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
+ arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
if (!arr)
goto out; /* Keep the existing array */
} else
@@ -265,7 +265,7 @@ static void ehea_update_bcmc_registrations(void)
}
if (num_registrations) {
- arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
+ arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
if (!arr)
goto out; /* Keep the existing array */
} else
@@ -776,6 +776,51 @@ static int ehea_proc_rwqes(struct net_device *dev,
return processed;
}
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ pr->sq_restart_flag = 0;
+ }
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+ struct ehea_swqe *swqe;
+ int swqe_index;
+ int i, k;
+
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ struct ehea_port_res *pr = &port->port_res[i];
+ k = 0;
+ swqe = ehea_get_swqe(pr->qp, &swqe_index);
+ memset(swqe, 0, SWQE_HEADER_SIZE);
+ atomic_dec(&pr->swqe_avail);
+
+ swqe->tx_control |= EHEA_SWQE_PURGE;
+ swqe->wr_id = SWQE_RESTART_CHECK;
+ swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+ swqe->immediate_data_length = 80;
+
+ ehea_post_swqe(pr->qp, swqe);
+
+ while (pr->sq_restart_flag == 0) {
+ msleep(5);
+ if (++k == 100) {
+ ehea_error("HW/SW queues out of sync");
+ ehea_schedule_port_reset(pr->port);
+ return;
+ }
+ }
+ }
+}
+
+
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
{
struct sk_buff *skb;
@@ -793,6 +838,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
+
+ if (cqe->wr_id == SWQE_RESTART_CHECK) {
+ pr->sq_restart_flag = 1;
+ swqe_av++;
+ break;
+ }
+
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
ehea_error("Bad send completion status=0x%04X",
cqe->status);
@@ -2675,8 +2727,10 @@ static void ehea_flush_sq(struct ehea_port *port)
int k = 0;
while (atomic_read(&pr->swqe_avail) < swqe_max) {
msleep(5);
- if (++k == 20)
+ if (++k == 20) {
+ ehea_error("WARNING: sq not flushed completely");
break;
+ }
}
}
}
@@ -2917,6 +2971,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
port_napi_disable(port);
mutex_unlock(&port->port_lock);
}
+ reset_sq_restart_flag(port);
}
/* Unregister old memory region */
@@ -2951,6 +3006,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
mutex_lock(&port->port_lock);
port_napi_enable(port);
ret = ehea_restart_qps(dev);
+ check_sqs(port);
if (!ret)
netif_wake_queue(dev);
mutex_unlock(&port->port_lock);
@@ -3376,7 +3432,7 @@ static ssize_t ehea_remove_port(struct device *dev,
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
-int ehea_create_device_sysfs(struct of_device *dev)
+int ehea_create_device_sysfs(struct platform_device *dev)
{
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
if (ret)
@@ -3387,13 +3443,13 @@ out:
return ret;
}
-void ehea_remove_device_sysfs(struct of_device *dev)
+void ehea_remove_device_sysfs(struct platform_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_probe_port);
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
-static int __devinit ehea_probe_adapter(struct of_device *dev,
+static int __devinit ehea_probe_adapter(struct platform_device *dev,
const struct of_device_id *id)
{
struct ehea_adapter *adapter;
@@ -3492,7 +3548,7 @@ out:
return ret;
}
-static int __devexit ehea_remove(struct of_device *dev)
+static int __devexit ehea_remove(struct platform_device *dev)
{
struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
int i;
@@ -3663,7 +3719,7 @@ int __init ehea_module_init(void)
if (ret)
ehea_info("failed registering memory remove notifier");
- ret = crash_shutdown_register(&ehea_crash_handler);
+ ret = crash_shutdown_register(ehea_crash_handler);
if (ret)
ehea_info("failed registering crash handler");
@@ -3688,7 +3744,7 @@ out3:
out2:
unregister_memory_notifier(&ehea_mem_nb);
unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(&ehea_crash_handler);
+ crash_shutdown_unregister(ehea_crash_handler);
out:
return ret;
}
@@ -3701,7 +3757,7 @@ static void __exit ehea_module_exit(void)
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(&ehea_crash_handler);
+ ret = crash_shutdown_unregister(ehea_crash_handler);
if (ret)
ehea_info("failed unregistering crash handler");
unregister_memory_notifier(&ehea_mem_nb);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index f239aa8c6f4c..75869ed7226f 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.1"
+#define DRV_VERSION "1.4.1.2"
#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 9aab85366d21..711077a2e345 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -911,7 +911,9 @@ static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
static int enic_set_mac_address(struct net_device *netdev, void *p)
{
- return -EOPNOTSUPP;
+ struct sockaddr *saddr = p;
+
+ return enic_set_mac_addr(netdev, (char *)saddr->sa_data);
}
static int enic_dev_packet_filter(struct enic *enic, int directed,
@@ -2152,17 +2154,6 @@ void enic_dev_deinit(struct enic *enic)
enic_clear_intr_mode(enic);
}
-static int enic_dev_stats_clear(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_stats_clear(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
int enic_dev_init(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
@@ -2205,10 +2196,6 @@ int enic_dev_init(struct enic *enic)
enic_init_vnic_resources(enic);
- /* Clear LIF stats
- */
- enic_dev_stats_clear(enic);
-
err = enic_set_rq_alloc_buf(enic);
if (err) {
dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 6a5b578a69e1..08d5d42da260 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -74,6 +74,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar, unsigned int num_bars)
{
struct vnic_resource_header __iomem *rh;
+ struct mgmt_barmap_hdr __iomem *mrh;
struct vnic_resource __iomem *r;
u8 type;
@@ -85,22 +86,32 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
return -EINVAL;
}
- rh = bar->vaddr;
+ rh = bar->vaddr;
+ mrh = bar->vaddr;
if (!rh) {
pr_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
- if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
- ioread32(&rh->version) != VNIC_RES_VERSION) {
- pr_err("vNIC BAR0 res magic/version error "
- "exp (%lx/%lx) curr (%x/%x)\n",
+ /* Check for mgmt vnic in addition to normal vnic */
+ if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
+ (ioread32(&rh->version) != VNIC_RES_VERSION)) {
+ if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
+ (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
+ pr_err("vNIC BAR0 res magic/version error "
+ "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
- return -EINVAL;
+ return -EINVAL;
+ }
}
- r = (struct vnic_resource __iomem *)(rh + 1);
+ if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
+ r = (struct vnic_resource __iomem *)(mrh + 1);
+ else
+ r = (struct vnic_resource __iomem *)(rh + 1);
+
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 20661755df6b..9abb3d51dea1 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -238,6 +238,18 @@ enum vnic_devcmd_cmd {
* out: (u32)a0=status of proxied cmd
* a1-a15=out args of proxied cmd */
CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+ /*
+ * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+ * or SR-IOV virtual vnic */
+ CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+ /*
+ * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in
+ * (u32)a1=length of buffer in a0
+ * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV
+ * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */
+ CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
};
/* flags for CMD_OPEN */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 3b3291248956..e8740e3704e4 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -30,7 +30,7 @@ struct vnic_enet_config {
u32 wq_desc_count;
u32 rq_desc_count;
u16 mtu;
- u16 intr_timer;
+ u16 intr_timer_deprecated;
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index 810287beff14..e0a73f1ca6f4 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -22,6 +22,11 @@
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
+#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
+#define MGMTVNIC_VERSION 0x00000000L
+
+/* The MAC address assigned to the CFG vNIC is fixed. */
+#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
/* vNIC resource types */
enum vnic_res_type {
@@ -52,6 +57,14 @@ struct vnic_resource_header {
u32 version;
};
+struct mgmt_barmap_hdr {
+ u32 magic; /* magic number */
+ u32 version; /* header format version */
+ u16 lif; /* loopback lif for mgmt frames */
+ u16 pci_slot; /* installed pci slot */
+ char serial[16]; /* card serial number */
+};
+
struct vnic_resource {
u8 type;
u8 bar;
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index dbb2aca258b9..b236d7cbc137 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -77,8 +77,10 @@ void vnic_rq_free(struct vnic_rq *rq)
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
- kfree(rq->bufs[i]);
- rq->bufs[i] = NULL;
+ if (rq->bufs[i]) {
+ kfree(rq->bufs[i]);
+ rq->bufs[i] = NULL;
+ }
}
rq->ctrl = NULL;
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 197c9d24af82..4725b79de0ef 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -54,8 +54,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
if (!vp || !value)
return -EINVAL;
- if (ntohl(vp->length) + sizeof(*tlv) + length >
- VIC_PROVINFO_MAX_TLV_DATA)
+ if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) +
+ length > VIC_PROVINFO_MAX_TLV_DATA)
return -ENOMEM;
tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
@@ -66,7 +66,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
memcpy(tlv->value, value, length);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
- vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length);
+ vp->length = htonl(ntohl(vp->length) +
+ offsetof(struct vic_provinfo_tlv, value) + length);
return 0;
}
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 122e33bcc578..4b2a6c6a569b 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -77,8 +77,10 @@ void vnic_wq_free(struct vnic_wq *wq)
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
- kfree(wq->bufs[i]);
- wq->bufs[i] = NULL;
+ if (wq->bufs[i]) {
+ kfree(wq->bufs[i]);
+ wq->bufs[i] = NULL;
+ }
}
wq->ctrl = NULL;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 57c8ac0ef3f1..32543a300b81 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -758,7 +758,7 @@ static int epic_open(struct net_device *dev)
init_timer(&ep->timer);
ep->timer.expires = jiffies + 3*HZ;
ep->timer.data = (unsigned long)dev;
- ep->timer.function = &epic_timer; /* timer handler */
+ ep->timer.function = epic_timer; /* timer handler */
add_timer(&ep->timer);
return 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 10e39f2b31c3..ce22e15a3776 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -637,7 +637,9 @@ static void eth16i_initialize(struct net_device *dev, int boot)
/* Set interface port type */
if(boot) {
- char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" };
+ static const char * const porttype[] = {
+ "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
+ };
switch(dev->if_port)
{
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6d653c459c1f..c5a2fe099a8d 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -806,11 +806,6 @@ static void ethoc_tx_timeout(struct net_device *dev)
ethoc_interrupt(dev->irq, dev);
}
-static struct net_device_stats *ethoc_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -863,7 +858,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_set_multicast_list = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
.ndo_tx_timeout = ethoc_tx_timeout,
- .ndo_get_stats = ethoc_stats,
.ndo_start_xmit = ethoc_start_xmit,
};
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d7e8f6b8f4cf..dd54abe2f710 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -915,14 +915,14 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = RUN_AT(3 * HZ);
np->timer.data = (unsigned long) dev;
- np->timer.function = &netdev_timer;
+ np->timer.function = netdev_timer;
/* timer handler */
add_timer(&np->timer);
init_timer(&np->reset_timer);
np->reset_timer.data = (unsigned long) dev;
- np->reset_timer.function = &reset_timer;
+ np->reset_timer.function = reset_timer;
np->reset_timer_armed = 0;
return 0;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index d1a5b17b2a95..e9f5d030bc26 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -771,11 +771,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
/* ethtool interface */
-static void mpc52xx_fec_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRIVER_NAME);
-}
static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -810,7 +805,6 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
}
static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
- .get_drvinfo = mpc52xx_fec_get_drvinfo,
.get_settings = mpc52xx_fec_get_settings,
.set_settings = mpc52xx_fec_set_settings,
.get_link = ethtool_op_get_link,
@@ -850,7 +844,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
/* ======================================================================== */
static int __devinit
-mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
+mpc52xx_fec_probe(struct platform_device *op, const struct of_device_id *match)
{
int rv;
struct net_device *ndev;
@@ -995,7 +989,7 @@ err_netdev:
}
static int
-mpc52xx_fec_remove(struct of_device *op)
+mpc52xx_fec_remove(struct platform_device *op)
{
struct net_device *ndev;
struct mpc52xx_fec_priv *priv;
@@ -1025,7 +1019,7 @@ mpc52xx_fec_remove(struct of_device *op)
}
#ifdef CONFIG_PM
-static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state)
+static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
@@ -1035,7 +1029,7 @@ static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state)
return 0;
}
-static int mpc52xx_fec_of_resume(struct of_device *op)
+static int mpc52xx_fec_of_resume(struct platform_device *op)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index dbaf72cbb233..0b4cb6f15984 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -61,7 +61,7 @@ static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
data | FEC_MII_WRITE_FRAME);
}
-static int mpc52xx_fec_mdio_probe(struct of_device *of,
+static int mpc52xx_fec_mdio_probe(struct platform_device *of,
const struct of_device_id *match)
{
struct device *dev = &of->dev;
@@ -122,7 +122,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
return err;
}
-static int mpc52xx_fec_mdio_remove(struct of_device *of)
+static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct device *dev = &of->dev;
struct mii_bus *bus = dev_get_drvdata(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4da05b1b445c..6a44fe411589 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5440,13 +5440,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->oom_kick);
np->oom_kick.data = (unsigned long) dev;
- np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
+ np->oom_kick.function = nv_do_rx_refill; /* timer handler */
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
- np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
+ np->nic_poll.function = nv_do_nic_poll; /* timer handler */
init_timer(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
- np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
+ np->stats_poll.function = nv_do_stats_poll; /* timer handler */
err = pci_enable_device(pci_dev);
if (err)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f08cff9020bd..d684f187de57 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -997,7 +997,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
-static int __devinit fs_enet_probe(struct of_device *ofdev,
+static int __devinit fs_enet_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct net_device *ndev;
@@ -1036,7 +1036,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
ndev = alloc_etherdev(privsize);
if (!ndev) {
ret = -ENOMEM;
- goto out_free_fpi;
+ goto out_put;
}
SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -1099,13 +1099,14 @@ out_cleanup_data:
out_free_dev:
free_netdev(ndev);
dev_set_drvdata(&ofdev->dev, NULL);
+out_put:
of_node_put(fpi->phy_node);
out_free_fpi:
kfree(fpi);
return ret;
}
-static int fs_enet_remove(struct of_device *ofdev)
+static int fs_enet_remove(struct platform_device *ofdev)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct fs_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 48e91b6242ce..7a84e45487e8 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -84,7 +84,7 @@ static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
static int do_pd_setup(struct fs_enet_private *fep)
{
- struct of_device *ofdev = to_of_device(fep->dev);
+ struct platform_device *ofdev = to_platform_device(fep->dev);
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 7ca1642276d0..61035fc5599b 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -96,7 +96,7 @@ static int whack_reset(struct fec __iomem *fecp)
static int do_pd_setup(struct fs_enet_private *fep)
{
- struct of_device *ofdev = to_of_device(fep->dev);
+ struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index a3c44544846d..22a02a767069 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -96,7 +96,7 @@ static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
static int do_pd_setup(struct fs_enet_private *fep)
{
- struct of_device *ofdev = to_of_device(fep->dev);
+ struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 3607340f3da7..3cda2b515471 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -150,7 +150,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct mii_bus *new_bus;
@@ -200,7 +200,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct of_device *ofdev)
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
struct bb_info *bitbang = bus->priv;
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index bddffd169b93..dbb9c48623df 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,7 +101,7 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct resource res;
@@ -192,7 +192,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct of_device *ofdev)
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
struct fec_info *fec = bus->priv;
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index b4c41d72c423..d4bf91aac25f 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -35,6 +35,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
@@ -264,7 +265,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
#endif
-static int fsl_pq_mdio_probe(struct of_device *ofdev,
+static int fsl_pq_mdio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -424,7 +425,7 @@ err_free_priv:
}
-static int fsl_pq_mdio_remove(struct of_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index a1b6301bc674..f30adbf86bb2 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -122,9 +122,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
-static int gfar_probe(struct of_device *ofdev,
+static int gfar_probe(struct platform_device *ofdev,
const struct of_device_id *match);
-static int gfar_remove(struct of_device *ofdev);
+static int gfar_remove(struct platform_device *ofdev);
static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
@@ -605,7 +605,7 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
+static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
const char *ctype;
@@ -959,7 +959,7 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
-static int gfar_probe(struct of_device *ofdev,
+static int gfar_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
u32 tempval;
@@ -1238,7 +1238,7 @@ register_fail:
return err;
}
-static int gfar_remove(struct of_device *ofdev)
+static int gfar_remove(struct platform_device *ofdev)
{
struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
@@ -1859,7 +1859,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, grp->interruptError);
- goto err_irq_fail;
+ goto err_irq_fail;
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
@@ -2048,7 +2048,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 bufaddr;
unsigned long flags;
unsigned int nr_frags, nr_txbds, length;
- union skb_shared_tx *shtx;
/*
* TOE=1 frames larger than 2500 bytes may see excess delays
@@ -2069,10 +2068,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
- shtx = skb_tx(skb);
/* check if time stamp should be generated */
- if (unlikely(shtx->hardware && priv->hwts_tx_en))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->hwts_tx_en))
do_tstamp = 1;
/* make space for additional header when fcb is needed */
@@ -2174,7 +2173,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup tx hardware time stamping if requested */
if (unlikely(do_tstamp)) {
- shtx->in_progress = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (fcb == NULL)
fcb = gfar_add_fcb(skb);
fcb->ptp = 1;
@@ -2446,7 +2445,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
int howmany = 0;
u32 lstatus;
size_t buflen;
- union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2461,8 +2459,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
- shtx = skb_tx(skb);
- if (unlikely(shtx->in_progress))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@@ -2476,7 +2473,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- if (unlikely(shtx->in_progress)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = next->length + GMAC_FCB_LEN;
} else
@@ -2485,7 +2482,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
- if (unlikely(shtx->in_progress)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -2657,7 +2654,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 710810e2adb4..68984eb88ae0 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1054,7 +1054,7 @@ struct gfar_private {
struct device_node *node;
struct net_device *ndev;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
enum gfar_errata errata;
struct gfar_priv_grp gfargrp[MAXGROUPS];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 4d09eab3548e..27d6960ce09e 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -893,7 +893,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
@@ -1373,7 +1373,7 @@ error:
}
/* Initialize the GRETH MAC */
-static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match)
{
struct net_device *dev;
struct greth_private *greth;
@@ -1412,7 +1412,7 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
}
regs = (struct greth_regs *) greth->regs;
- greth->irq = ofdev->irqs[0];
+ greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
SET_NETDEV_DEV(dev, greth->dev);
@@ -1547,10 +1547,10 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
dev->netdev_ops = &greth_netdev_ops;
dev->ethtool_ops = &greth_ethtool_ops;
- if (register_netdev(dev)) {
+ err = register_netdev(dev);
+ if (err) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "netdevice registration failed.\n");
- err = -ENOMEM;
goto error5;
}
@@ -1572,7 +1572,7 @@ error1:
return err;
}
-static int __devexit greth_of_remove(struct of_device *of_dev)
+static int __devexit greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
struct greth_private *greth = netdev_priv(ndev);
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 973388d6abca..03ad903cd676 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -118,7 +118,7 @@ struct greth_private {
int irq;
- struct device *dev; /* Pointer to of_device->dev */
+ struct device *dev; /* Pointer to platform_device->dev */
struct net_device *netdev;
struct napi_struct napi;
spinlock_t devlock;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 49aac7027fbb..9a6485892b3d 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1004,7 +1004,7 @@ static int hamachi_open(struct net_device *dev)
init_timer(&hmp->timer);
hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
hmp->timer.data = (unsigned long)dev;
- hmp->timer.function = &hamachi_timer; /* timer handler */
+ hmp->timer.function = hamachi_timer; /* timer handler */
add_timer(&hmp->timer);
return 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 9f64c8637208..33655814448e 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1069,7 +1069,8 @@ static void scc_tx_done(struct scc_channel *scc)
case KISS_DUPLEX_LINK:
scc->stat.tx_state = TXS_IDLE2;
if (scc->kiss.idletime != TIMER_OFF)
- scc_start_tx_timer(scc, t_idle, scc->kiss.idletime*100);
+ scc_start_tx_timer(scc, t_idle,
+ scc->kiss.idletime*100);
break;
case KISS_DUPLEX_OPTIMA:
scc_notify(scc, HWEV_ALL_SENT);
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 86ececd3c658..d15d2f2ba78e 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -204,10 +204,10 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
ei_status.rx_start_page = HP_START_PG + TX_PAGES;
ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
- ei_status.reset_8390 = &hp_reset_8390;
- ei_status.get_8390_hdr = &hp_get_8390_hdr;
- ei_status.block_input = &hp_block_input;
- ei_status.block_output = &hp_block_output;
+ ei_status.reset_8390 = hp_reset_8390;
+ ei_status.get_8390_hdr = hp_get_8390_hdr;
+ ei_status.block_input = hp_block_input;
+ ei_status.block_output = hp_block_output;
hp_init_card(dev);
retval = register_netdev(dev);
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 07d8e5b634f3..c5ef62ceb840 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -155,10 +155,10 @@ static int __devinit hydra_init(struct zorro_dev *z)
ei_status.rx_start_page = start_page + TX_PAGES;
- ei_status.reset_8390 = &hydra_reset_8390;
- ei_status.block_input = &hydra_block_input;
- ei_status.block_output = &hydra_block_output;
- ei_status.get_8390_hdr = &hydra_get_8390_hdr;
+ ei_status.reset_8390 = hydra_reset_8390;
+ ei_status.block_input = hydra_block_input;
+ ei_status.block_output = hydra_block_output;
+ ei_status.get_8390_hdr = hydra_get_8390_hdr;
ei_status.reg_offset = hydra_offsets;
dev->netdev_ops = &hydra_netdev_ops;
@@ -173,9 +173,8 @@ static int __devinit hydra_init(struct zorro_dev *z)
zorro_set_drvdata(z, dev);
- printk(KERN_INFO "%s: Hydra at 0x%08llx, address "
- "%pM (hydra.c " HYDRA_VERSION ")\n",
- dev->name, (unsigned long long)z->resource.start, dev->dev_addr);
+ pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n",
+ dev->name, &z->resource, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 0f1d4e96cf89..3506fd6ad726 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2245,7 +2245,7 @@ static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
struct emac_depentry {
u32 phandle;
struct device_node *node;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
void *drvdata;
};
@@ -2339,11 +2339,11 @@ static int __devinit emac_wait_deps(struct emac_instance *dev)
deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
if (dev->blist && dev->blist > emac_boot_list)
deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
- bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
+ bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
wait_event_timeout(emac_probe_wait,
emac_check_deps(dev, deps),
EMAC_PROBE_DEP_TIMEOUT);
- bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
+ bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
for (i = 0; i < EMAC_DEP_COUNT; i++) {
if (deps[i].node)
@@ -2719,7 +2719,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_change_mtu = emac_change_mtu,
};
-static int __devinit emac_probe(struct of_device *ofdev,
+static int __devinit emac_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct net_device *ndev;
@@ -2941,7 +2941,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
return err;
}
-static int __devexit emac_remove(struct of_device *ofdev)
+static int __devexit emac_remove(struct platform_device *ofdev)
{
struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index b1cbe6fdfc7a..9e37e3d9c51d 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -170,12 +170,12 @@ struct emac_instance {
struct net_device *ndev;
struct resource rsrc_regs;
struct emac_regs __iomem *emacp;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
struct device_node **blist; /* bootlist entry */
/* MAL linkage */
u32 mal_ph;
- struct of_device *mal_dev;
+ struct platform_device *mal_dev;
u32 mal_rx_chan;
u32 mal_tx_chan;
struct mal_instance *mal;
@@ -196,24 +196,24 @@ struct emac_instance {
/* Shared MDIO if any */
u32 mdio_ph;
- struct of_device *mdio_dev;
+ struct platform_device *mdio_dev;
struct emac_instance *mdio_instance;
struct mutex mdio_lock;
/* ZMII infos if any */
u32 zmii_ph;
u32 zmii_port;
- struct of_device *zmii_dev;
+ struct platform_device *zmii_dev;
/* RGMII infos if any */
u32 rgmii_ph;
u32 rgmii_port;
- struct of_device *rgmii_dev;
+ struct platform_device *rgmii_dev;
/* TAH infos if any */
u32 tah_ph;
u32 tah_port;
- struct of_device *tah_dev;
+ struct platform_device *tah_dev;
/* IRQs */
int wol_irq;
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995fafc1e08..8c6c1e2a8750 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
}
#if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key, struct tty_struct *tty)
+static void emac_sysrq_handler(int key)
{
emac_dbg_dump_all();
}
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index fcff9e0bd382..d5717e2123e1 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -517,7 +517,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
-static int __devinit mal_probe(struct of_device *ofdev,
+static int __devinit mal_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct mal_instance *mal;
@@ -730,7 +730,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
return err;
}
-static int __devexit mal_remove(struct of_device *ofdev)
+static int __devexit mal_remove(struct platform_device *ofdev)
{
struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index 9ededfbf0726..66084214bf45 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -210,7 +210,7 @@ struct mal_instance {
dma_addr_t bd_dma;
struct mal_descriptor *bd_virt;
- struct of_device *ofdev;
+ struct platform_device *ofdev;
int index;
spinlock_t lock;
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 108919bcdf13..dd61798897ac 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -93,7 +93,7 @@ static inline u32 rgmii_mode_mask(int mode, int input)
}
}
-int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
+int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -122,7 +122,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
return 0;
}
-void rgmii_set_speed(struct of_device *ofdev, int input, int speed)
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -144,7 +144,7 @@ void rgmii_set_speed(struct of_device *ofdev, int input, int speed)
mutex_unlock(&dev->lock);
}
-void rgmii_get_mdio(struct of_device *ofdev, int input)
+void rgmii_get_mdio(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -165,7 +165,7 @@ void rgmii_get_mdio(struct of_device *ofdev, int input)
DBG2(dev, " fer = 0x%08x\n", fer);
}
-void rgmii_put_mdio(struct of_device *ofdev, int input)
+void rgmii_put_mdio(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p = dev->base;
@@ -186,7 +186,7 @@ void rgmii_put_mdio(struct of_device *ofdev, int input)
mutex_unlock(&dev->lock);
}
-void rgmii_detach(struct of_device *ofdev, int input)
+void rgmii_detach(struct platform_device *ofdev, int input)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct rgmii_regs __iomem *p;
@@ -206,13 +206,13 @@ void rgmii_detach(struct of_device *ofdev, int input)
mutex_unlock(&dev->lock);
}
-int rgmii_get_regs_len(struct of_device *ofdev)
+int rgmii_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct rgmii_regs);
}
-void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
@@ -228,7 +228,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
}
-static int __devinit rgmii_probe(struct of_device *ofdev,
+static int __devinit rgmii_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -293,7 +293,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
return rc;
}
-static int __devexit rgmii_remove(struct of_device *ofdev)
+static int __devexit rgmii_remove(struct platform_device *ofdev)
{
struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/rgmii.h b/drivers/net/ibm_newemac/rgmii.h
index c4a4b358a270..d69799049865 100644
--- a/drivers/net/ibm_newemac/rgmii.h
+++ b/drivers/net/ibm_newemac/rgmii.h
@@ -51,20 +51,20 @@ struct rgmii_instance {
int users;
/* OF device instance */
- struct of_device *ofdev;
+ struct platform_device *ofdev;
};
#ifdef CONFIG_IBM_NEW_EMAC_RGMII
extern int rgmii_init(void);
extern void rgmii_exit(void);
-extern int rgmii_attach(struct of_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct of_device *ofdev, int input);
-extern void rgmii_get_mdio(struct of_device *ofdev, int input);
-extern void rgmii_put_mdio(struct of_device *ofdev, int input);
-extern void rgmii_set_speed(struct of_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct of_device *ofdev);
-extern void *rgmii_dump_regs(struct of_device *ofdev, void *buf);
+extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+extern void rgmii_detach(struct platform_device *ofdev, int input);
+extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
+extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
+extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+extern int rgmii_get_regs_len(struct platform_device *ofdev);
+extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 044637144c43..299aa49490c0 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -23,7 +23,7 @@
#include "emac.h"
#include "core.h"
-int __devinit tah_attach(struct of_device *ofdev, int channel)
+int __devinit tah_attach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -35,7 +35,7 @@ int __devinit tah_attach(struct of_device *ofdev, int channel)
return 0;
}
-void tah_detach(struct of_device *ofdev, int channel)
+void tah_detach(struct platform_device *ofdev, int channel)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -44,7 +44,7 @@ void tah_detach(struct of_device *ofdev, int channel)
mutex_unlock(&dev->lock);
}
-void tah_reset(struct of_device *ofdev)
+void tah_reset(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct tah_regs __iomem *p = dev->base;
@@ -66,13 +66,13 @@ void tah_reset(struct of_device *ofdev)
TAH_MR_DIG);
}
-int tah_get_regs_len(struct of_device *ofdev)
+int tah_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct tah_regs);
}
-void *tah_dump_regs(struct of_device *ofdev, void *buf)
+void *tah_dump_regs(struct platform_device *ofdev, void *buf)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
@@ -87,7 +87,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit tah_probe(struct of_device *ofdev,
+static int __devinit tah_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -139,7 +139,7 @@ static int __devinit tah_probe(struct of_device *ofdev,
return rc;
}
-static int __devexit tah_remove(struct of_device *ofdev)
+static int __devexit tah_remove(struct platform_device *ofdev)
{
struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ibm_newemac/tah.h
index a068b5658dad..61dbeca006d1 100644
--- a/drivers/net/ibm_newemac/tah.h
+++ b/drivers/net/ibm_newemac/tah.h
@@ -48,7 +48,7 @@ struct tah_instance {
int users;
/* OF device instance */
- struct of_device *ofdev;
+ struct platform_device *ofdev;
};
@@ -74,11 +74,11 @@ struct tah_instance {
extern int tah_init(void);
extern void tah_exit(void);
-extern int tah_attach(struct of_device *ofdev, int channel);
-extern void tah_detach(struct of_device *ofdev, int channel);
-extern void tah_reset(struct of_device *ofdev);
-extern int tah_get_regs_len(struct of_device *ofdev);
-extern void *tah_dump_regs(struct of_device *ofdev, void *buf);
+extern int tah_attach(struct platform_device *ofdev, int channel);
+extern void tah_detach(struct platform_device *ofdev, int channel);
+extern void tah_reset(struct platform_device *ofdev);
+extern int tah_get_regs_len(struct platform_device *ofdev);
+extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
#else
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 046dcd069c45..34ed6ee8ca8a 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -82,7 +82,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
+int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct zmii_regs __iomem *p = dev->base;
@@ -148,7 +148,7 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
return 0;
}
-void zmii_get_mdio(struct of_device *ofdev, int input)
+void zmii_get_mdio(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
u32 fer;
@@ -161,7 +161,7 @@ void zmii_get_mdio(struct of_device *ofdev, int input)
out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
}
-void zmii_put_mdio(struct of_device *ofdev, int input)
+void zmii_put_mdio(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -170,7 +170,7 @@ void zmii_put_mdio(struct of_device *ofdev, int input)
}
-void zmii_set_speed(struct of_device *ofdev, int input, int speed)
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
u32 ssr;
@@ -191,7 +191,7 @@ void zmii_set_speed(struct of_device *ofdev, int input, int speed)
mutex_unlock(&dev->lock);
}
-void zmii_detach(struct of_device *ofdev, int input)
+void zmii_detach(struct platform_device *ofdev, int input)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
@@ -210,13 +210,13 @@ void zmii_detach(struct of_device *ofdev, int input)
mutex_unlock(&dev->lock);
}
-int zmii_get_regs_len(struct of_device *ofdev)
+int zmii_get_regs_len(struct platform_device *ofdev)
{
return sizeof(struct emac_ethtool_regs_subhdr) +
sizeof(struct zmii_regs);
}
-void *zmii_dump_regs(struct of_device *ofdev, void *buf)
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
struct emac_ethtool_regs_subhdr *hdr = buf;
@@ -231,7 +231,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit zmii_probe(struct of_device *ofdev,
+static int __devinit zmii_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->dev.of_node;
@@ -286,7 +286,7 @@ static int __devinit zmii_probe(struct of_device *ofdev,
return rc;
}
-static int __devexit zmii_remove(struct of_device *ofdev)
+static int __devexit zmii_remove(struct platform_device *ofdev)
{
struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
diff --git a/drivers/net/ibm_newemac/zmii.h b/drivers/net/ibm_newemac/zmii.h
index 6c9beba0c4b6..1333fa2b2781 100644
--- a/drivers/net/ibm_newemac/zmii.h
+++ b/drivers/net/ibm_newemac/zmii.h
@@ -48,20 +48,20 @@ struct zmii_instance {
u32 fer_save;
/* OF device instance */
- struct of_device *ofdev;
+ struct platform_device *ofdev;
};
#ifdef CONFIG_IBM_NEW_EMAC_ZMII
extern int zmii_init(void);
extern void zmii_exit(void);
-extern int zmii_attach(struct of_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct of_device *ofdev, int input);
-extern void zmii_get_mdio(struct of_device *ofdev, int input);
-extern void zmii_put_mdio(struct of_device *ofdev, int input);
-extern void zmii_set_speed(struct of_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct of_device *ocpdev);
-extern void *zmii_dump_regs(struct of_device *ofdev, void *buf);
+extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+extern void zmii_detach(struct platform_device *ofdev, int input);
+extern void zmii_get_mdio(struct platform_device *ofdev, int input);
+extern void zmii_put_mdio(struct platform_device *ofdev, int input);
+extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+extern int zmii_get_regs_len(struct platform_device *ocpdev);
+extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
#else
# define zmii_init() 0
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 294ccfb427cf..0037a696cd0a 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -602,7 +602,7 @@ static void irqrx_handler(struct net_device *dev)
/* set up skb fields */
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* bookkeeping */
dev->stats.rx_packets++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 2602852cc55a..b3e157ed6776 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1,122 +1,84 @@
-/**************************************************************************/
-/* */
-/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
-/* Copyright (C) 2003 IBM Corp. */
-/* Originally written by Dave Larson (larson1@us.ibm.com) */
-/* Maintained by Santiago Leon (santil@us.ibm.com) */
-/* */
-/* This program is free software; you can redistribute it and/or modify */
-/* it under the terms of the GNU General Public License as published by */
-/* the Free Software Foundation; either version 2 of the License, or */
-/* (at your option) any later version. */
-/* */
-/* This program is distributed in the hope that it will be useful, */
-/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
-/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
-/* GNU General Public License for more details. */
-/* */
-/* You should have received a copy of the GNU General Public License */
-/* along with this program; if not, write to the Free Software */
-/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
-/* USA */
-/* */
-/* This module contains the implementation of a virtual ethernet device */
-/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
-/* option of the RS/6000 Platform Architechture to interface with virtual */
-/* ethernet NICs that are presented to the partition by the hypervisor. */
-/* */
-/**************************************************************************/
/*
- TODO:
- - add support for sysfs
- - possibly remove procfs support
-*/
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/ethtool.h>
-#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/slab.h>
-#include <net/net_namespace.h>
#include <asm/hvcall.h>
#include <asm/atomic.h>
#include <asm/vio.h>
#include <asm/iommu.h>
-#include <asm/uaccess.h>
#include <asm/firmware.h>
-#include <linux/seq_file.h>
#include "ibmveth.h"
-#undef DEBUG
-
-#define ibmveth_printk(fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
-
-#define ibmveth_error_printk(fmt, args...) \
- printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
-
-#ifdef DEBUG
-#define ibmveth_debug_printk_no_adapter(fmt, args...) \
- printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
-#define ibmveth_debug_printk(fmt, args...) \
- printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
-#define ibmveth_assert(expr) \
- if(!(expr)) { \
- printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
- BUG(); \
- }
-#else
-#define ibmveth_debug_printk_no_adapter(fmt, args...)
-#define ibmveth_debug_printk(fmt, args...)
-#define ibmveth_assert(expr)
-#endif
-
-static int ibmveth_open(struct net_device *dev);
-static int ibmveth_close(struct net_device *dev);
-static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-static int ibmveth_poll(struct napi_struct *napi, int budget);
-static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static void ibmveth_set_multicast_list(struct net_device *dev);
-static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
-static void ibmveth_proc_register_driver(void);
-static void ibmveth_proc_unregister_driver(void);
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
-static struct kobj_type ktype_veth_pool;
+static struct kobj_type ktype_veth_pool;
-#ifdef CONFIG_PROC_FS
-#define IBMVETH_PROC_DIR "ibmveth"
-static struct proc_dir_entry *ibmveth_proc_dir;
-#endif
static const char ibmveth_driver_name[] = "ibmveth";
-static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.03"
+static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
+#define ibmveth_driver_version "1.04"
-MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
-MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
+MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(ibmveth_driver_version);
+static unsigned int tx_copybreak __read_mostly = 128;
+module_param(tx_copybreak, uint, 0644);
+MODULE_PARM_DESC(tx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on transmit");
+
+static unsigned int rx_copybreak __read_mostly = 128;
+module_param(rx_copybreak, uint, 0644);
+MODULE_PARM_DESC(rx_copybreak,
+ "Maximum size of packet that is copied to a new buffer on receive");
+
+static unsigned int rx_flush __read_mostly = 0;
+module_param(rx_flush, uint, 0644);
+MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+
struct ibmveth_stat {
char name[ETH_GSTRING_LEN];
int offset;
@@ -128,12 +90,16 @@ struct ibmveth_stat {
struct ibmveth_stat ibmveth_stats[] = {
{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
- { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
- { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
+ { "replenish_add_buff_failure",
+ IBMVETH_STAT_OFF(replenish_add_buff_failure) },
+ { "replenish_add_buff_success",
+ IBMVETH_STAT_OFF(replenish_add_buff_success) },
{ "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
{ "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
{ "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
+ { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
+ { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
};
/* simple methods of getting data from the current rxq entry */
@@ -144,41 +110,44 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT;
+ return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
+ IBMVETH_RXQ_TOGGLE_SHIFT;
}
static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle);
+ return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
}
static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID);
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
}
static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK);
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
}
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
- return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
+ return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
}
static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
{
- return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD);
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
/* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
+ u32 pool_index, u32 pool_size,
+ u32 buff_size, u32 pool_active)
{
pool->size = pool_size;
pool->index = pool_index;
pool->buff_size = buff_size;
- pool->threshold = pool_size / 2;
+ pool->threshold = pool_size * 7 / 8;
pool->active = pool_active;
}
@@ -189,12 +158,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
- if(!pool->free_map) {
+ if (!pool->free_map)
return -1;
- }
pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
- if(!pool->dma_addr) {
+ if (!pool->dma_addr) {
kfree(pool->free_map);
pool->free_map = NULL;
return -1;
@@ -202,7 +170,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
- if(!pool->skbuff) {
+ if (!pool->skbuff) {
kfree(pool->dma_addr);
pool->dma_addr = NULL;
@@ -213,9 +181,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
- for(i = 0; i < pool->size; ++i) {
+ for (i = 0; i < pool->size; ++i)
pool->free_map[i] = i;
- }
atomic_set(&pool->available, 0);
pool->producer_index = 0;
@@ -224,10 +191,19 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return 0;
}
+static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+{
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+ asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
+}
+
/* replenish the buffers for a pool. note that we don't need to
* skb_reserve these since they are used for incoming...
*/
-static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
+static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
{
u32 i;
u32 count = pool->size - atomic_read(&pool->available);
@@ -240,23 +216,26 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
mb();
- for(i = 0; i < count; ++i) {
+ for (i = 0; i < count; ++i) {
union ibmveth_buf_desc desc;
- skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
- if(!skb) {
- ibmveth_debug_printk("replenish: unable to allocate skb\n");
+ if (!skb) {
+ netdev_dbg(adapter->netdev,
+ "replenish: unable to allocate skb\n");
adapter->replenish_no_mem++;
break;
}
free_index = pool->consumer_index;
- pool->consumer_index = (pool->consumer_index + 1) % pool->size;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
index = pool->free_map[free_index];
- ibmveth_assert(index != IBM_VETH_INVALID_MAP);
- ibmveth_assert(pool->skbuff[index] == NULL);
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+ BUG_ON(pool->skbuff[index] != NULL);
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
@@ -269,16 +248,23 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
pool->skbuff[index] = skb;
correlator = ((u64)pool->index << 32) | index;
- *(u64*)skb->data = correlator;
+ *(u64 *)skb->data = correlator;
desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
desc.fields.address = dma_addr;
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
+ if (rx_flush) {
+ unsigned int len = min(pool->buff_size,
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
+ ibmveth_flush_buffer(skb->data, len);
+ }
+ lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
+ desc.desc);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc != H_SUCCESS) {
goto failure;
- else {
+ } else {
buffers_added++;
adapter->replenish_add_buff_success++;
}
@@ -313,26 +299,31 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
adapter->replenish_task_cycles++;
- for (i = (IbmVethNumBufferPools - 1); i >= 0; i--)
- if(adapter->rx_buff_pool[i].active)
- ibmveth_replenish_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
+ for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
+ struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
- adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
+ if (pool->active &&
+ (atomic_read(&pool->available) < pool->threshold))
+ ibmveth_replenish_buffer_pool(adapter, pool);
+ }
+
+ adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+ 4096 - 8);
}
/* empty and free ana buffer pool - also used to do cleanup in error paths */
-static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
+static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
+ struct ibmveth_buff_pool *pool)
{
int i;
kfree(pool->free_map);
pool->free_map = NULL;
- if(pool->skbuff && pool->dma_addr) {
- for(i = 0; i < pool->size; ++i) {
+ if (pool->skbuff && pool->dma_addr) {
+ for (i = 0; i < pool->size; ++i) {
struct sk_buff *skb = pool->skbuff[i];
- if(skb) {
+ if (skb) {
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[i],
pool->buff_size,
@@ -343,31 +334,32 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
}
}
- if(pool->dma_addr) {
+ if (pool->dma_addr) {
kfree(pool->dma_addr);
pool->dma_addr = NULL;
}
- if(pool->skbuff) {
+ if (pool->skbuff) {
kfree(pool->skbuff);
pool->skbuff = NULL;
}
}
/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
+static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- ibmveth_assert(pool < IbmVethNumBufferPools);
- ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
skb = adapter->rx_buff_pool[pool].skbuff[index];
- ibmveth_assert(skb != NULL);
+ BUG_ON(skb == NULL);
adapter->rx_buff_pool[pool].skbuff[index] = NULL;
@@ -377,9 +369,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
DMA_FROM_DEVICE);
free_index = adapter->rx_buff_pool[pool].producer_index;
- adapter->rx_buff_pool[pool].producer_index
- = (adapter->rx_buff_pool[pool].producer_index + 1)
- % adapter->rx_buff_pool[pool].size;
+ adapter->rx_buff_pool[pool].producer_index++;
+ if (adapter->rx_buff_pool[pool].producer_index >=
+ adapter->rx_buff_pool[pool].size)
+ adapter->rx_buff_pool[pool].producer_index = 0;
adapter->rx_buff_pool[pool].free_map[free_index] = index;
mb();
@@ -394,8 +387,8 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- ibmveth_assert(pool < IbmVethNumBufferPools);
- ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
return adapter->rx_buff_pool[pool].skbuff[index];
}
@@ -410,10 +403,10 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
union ibmveth_buf_desc desc;
unsigned long lpar_rc;
- ibmveth_assert(pool < IbmVethNumBufferPools);
- ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
+ BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
+ BUG_ON(index >= adapter->rx_buff_pool[pool].size);
- if(!adapter->rx_buff_pool[pool].active) {
+ if (!adapter->rx_buff_pool[pool].active) {
ibmveth_rxq_harvest_buffer(adapter);
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
return;
@@ -425,12 +418,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
+ "during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
}
- if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
@@ -440,7 +434,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
{
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
- if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
+ if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
@@ -451,7 +445,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
int i;
struct device *dev = &adapter->vdev->dev;
- if(adapter->buffer_list_addr != NULL) {
+ if (adapter->buffer_list_addr != NULL) {
if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -461,7 +455,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->buffer_list_addr = NULL;
}
- if(adapter->filter_list_addr != NULL) {
+ if (adapter->filter_list_addr != NULL) {
if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -471,7 +465,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->filter_list_addr = NULL;
}
- if(adapter->rx_queue.queue_addr != NULL) {
+ if (adapter->rx_queue.queue_addr != NULL) {
if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
dma_unmap_single(dev,
adapter->rx_queue.queue_dma,
@@ -483,7 +477,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
adapter->rx_queue.queue_addr = NULL;
}
- for(i = 0; i<IbmVethNumBufferPools; i++)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (adapter->rx_buff_pool[i].active)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
@@ -506,9 +500,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
{
int rc, try_again = 1;
- /* After a kexec the adapter will still be open, so our attempt to
- * open it will fail. So if we get a failure we free the adapter and
- * try again, but only once. */
+ /*
+ * After a kexec the adapter will still be open, so our attempt to
+ * open it will fail. So if we get a failure we free the adapter and
+ * try again, but only once.
+ */
retry:
rc = h_register_logical_lan(adapter->vdev->unit_address,
adapter->buffer_list_dma, rxq_desc.desc,
@@ -537,28 +533,31 @@ static int ibmveth_open(struct net_device *netdev)
int i;
struct device *dev;
- ibmveth_debug_printk("open starting\n");
+ netdev_dbg(netdev, "open starting\n");
napi_enable(&adapter->napi);
- for(i = 0; i<IbmVethNumBufferPools; i++)
+ for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
- if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
- ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
+ if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter or buffer list "
+ "pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
}
- adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
- adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+ adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+ GFP_KERNEL);
- if(!adapter->rx_queue.queue_addr) {
- ibmveth_error_printk("unable to allocate rx queue pages\n");
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
@@ -577,7 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
(dma_mapping_error(dev, adapter->filter_list_dma)) ||
(dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
- ibmveth_error_printk("unable to map filter or buffer list pages\n");
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
@@ -590,20 +590,23 @@ static int ibmveth_open(struct net_device *netdev)
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
mac_address = mac_address >> 16;
- rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
+ rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
+ adapter->rx_queue.queue_len;
rxq_desc.fields.address = adapter->rx_queue.queue_dma;
- ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
- ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
- ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+ netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
+ netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
+ netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
- ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n",
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
+ lpar_rc);
+ netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
+ "desc:0x%llx MAC:0x%llx\n",
adapter->buffer_list_dma,
adapter->filter_list_dma,
rxq_desc.desc,
@@ -613,11 +616,11 @@ static int ibmveth_open(struct net_device *netdev)
return -ENONET;
}
- for(i = 0; i<IbmVethNumBufferPools; i++) {
- if(!adapter->rx_buff_pool[i].active)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
+ if (!adapter->rx_buff_pool[i].active)
continue;
if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
- ibmveth_error_printk("unable to alloc pool\n");
+ netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0;
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
@@ -625,9 +628,12 @@ static int ibmveth_open(struct net_device *netdev)
}
}
- ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
- if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
- ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
+ netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
+ rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
+ netdev);
+ if (rc != 0) {
+ netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
+ netdev->irq, rc);
do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
@@ -640,7 +646,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) {
- ibmveth_error_printk("unable to allocate bounce buffer\n");
+ netdev_err(netdev, "unable to allocate bounce buffer\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
@@ -649,18 +655,18 @@ static int ibmveth_open(struct net_device *netdev)
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- ibmveth_error_printk("unable to map bounce buffer\n");
+ netdev_err(netdev, "unable to map bounce buffer\n");
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return -ENOMEM;
}
- ibmveth_debug_printk("initial replenish cycle\n");
+ netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
netif_start_queue(netdev);
- ibmveth_debug_printk("open complete\n");
+ netdev_dbg(netdev, "open complete\n");
return 0;
}
@@ -670,7 +676,7 @@ static int ibmveth_close(struct net_device *netdev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long lpar_rc;
- ibmveth_debug_printk("close starting\n");
+ netdev_dbg(netdev, "close starting\n");
napi_disable(&adapter->napi);
@@ -683,26 +689,29 @@ static int ibmveth_close(struct net_device *netdev)
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- if(lpar_rc != H_SUCCESS)
- {
- ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
- lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_free_logical_lan failed with %lx, "
+ "continuing with close\n", lpar_rc);
}
free_irq(netdev->irq, netdev);
- adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
+ adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+ 4096 - 8);
ibmveth_cleanup(adapter);
- ibmveth_debug_printk("close complete\n");
+ netdev_dbg(netdev, "close complete\n");
return 0;
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
cmd->speed = SPEED_1000;
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_FIBRE;
@@ -714,12 +723,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
+ strncpy(info->version, ibmveth_driver_version,
+ sizeof(info->version) - 1);
}
-static u32 netdev_get_link(struct net_device *dev) {
+static u32 netdev_get_link(struct net_device *dev)
+{
return 1;
}
@@ -727,18 +740,20 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
- if (data)
+ if (data) {
adapter->rx_csum = 1;
- else {
+ } else {
/*
- * Since the ibmveth firmware interface does not have the concept of
- * separate tx/rx checksum offload enable, if rx checksum is disabled
- * we also have to disable tx checksum offload. Once we disable rx
- * checksum offload, we are no longer allowed to send tx buffers that
- * are not properly checksummed.
+ * Since the ibmveth firmware interface does not have the
+ * concept of separate tx/rx checksum offload enable, if rx
+ * checksum is disabled we also have to disable tx checksum
+ * offload. Once we disable rx checksum offload, we are no
+ * longer allowed to send tx buffers that are not properly
+ * checksummed.
*/
adapter->rx_csum = 0;
dev->features &= ~NETIF_F_IP_CSUM;
+ dev->features &= ~NETIF_F_IPV6_CSUM;
}
}
@@ -747,10 +762,15 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
if (data) {
- dev->features |= NETIF_F_IP_CSUM;
+ if (adapter->fw_ipv4_csum_support)
+ dev->features |= NETIF_F_IP_CSUM;
+ if (adapter->fw_ipv6_csum_support)
+ dev->features |= NETIF_F_IPV6_CSUM;
adapter->rx_csum = 1;
- } else
+ } else {
dev->features &= ~NETIF_F_IP_CSUM;
+ dev->features &= ~NETIF_F_IPV6_CSUM;
+ }
}
static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
@@ -758,7 +778,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr;
- long ret;
+ unsigned long set_attr6, clr_attr6;
+ long ret, ret6;
int rc1 = 0, rc2 = 0;
int restart = 0;
@@ -772,10 +793,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
set_attr = 0;
clr_attr = 0;
- if (data)
+ if (data) {
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
- else
+ set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ } else {
clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
+ clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
+ }
ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
@@ -786,18 +810,39 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
set_attr, &ret_attr);
if (ret != H_SUCCESS) {
- rc1 = -EIO;
- ibmveth_error_printk("unable to change checksum offload settings."
- " %d rc=%ld\n", data, ret);
+ netdev_err(dev, "unable to change IPv4 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret);
ret = h_illan_attributes(adapter->vdev->unit_address,
set_attr, clr_attr, &ret_attr);
+ } else {
+ adapter->fw_ipv4_csum_support = data;
+ }
+
+ ret6 = h_illan_attributes(adapter->vdev->unit_address,
+ clr_attr6, set_attr6, &ret_attr);
+
+ if (ret6 != H_SUCCESS) {
+ netdev_err(dev, "unable to change IPv6 checksum "
+ "offload settings. %d rc=%ld\n",
+ data, ret);
+
+ ret = h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6,
+ &ret_attr);
} else
+ adapter->fw_ipv6_csum_support = data;
+
+ if (ret == H_SUCCESS || ret6 == H_SUCCESS)
done(dev, data);
+ else
+ rc1 = -EIO;
} else {
rc1 = -EIO;
- ibmveth_error_printk("unable to change checksum offload settings."
- " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr);
+ netdev_err(dev, "unable to change checksum offload settings."
+ " %d rc=%ld ret_attr=%lx\n", data, ret,
+ ret_attr);
}
if (restart)
@@ -821,13 +866,14 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rc = 0;
- if (data && (dev->features & NETIF_F_IP_CSUM))
+ if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
return 0;
- if (!data && !(dev->features & NETIF_F_IP_CSUM))
+ if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
return 0;
if (data && !adapter->rx_csum)
- rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
+ rc = ibmveth_set_csum_offload(dev, data,
+ ibmveth_set_tx_csum_flags);
else
ibmveth_set_tx_csum_flags(dev, data);
@@ -881,6 +927,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_strings = ibmveth_get_strings,
.get_sset_count = ibmveth_get_sset_count,
.get_ethtool_stats = ibmveth_get_ethtool_stats,
+ .set_sg = ethtool_op_set_sg,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -890,129 +937,216 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
-static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+static int ibmveth_send(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc *descs)
{
- struct ibmveth_adapter *adapter = netdev_priv(netdev);
- union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
unsigned long correlator;
- unsigned long flags;
unsigned int retry_count;
- unsigned int tx_dropped = 0;
- unsigned int tx_bytes = 0;
- unsigned int tx_packets = 0;
- unsigned int tx_send_failed = 0;
- unsigned int tx_map_failed = 0;
- int used_bounce = 0;
- unsigned long data_dma_addr;
+ unsigned long ret;
+
+ /*
+ * The retry count sets a maximum for the number of broadcast and
+ * multicast destinations within the system.
+ */
+ retry_count = 1024;
+ correlator = 0;
+ do {
+ ret = h_send_logical_lan(adapter->vdev->unit_address,
+ descs[0].desc, descs[1].desc,
+ descs[2].desc, descs[3].desc,
+ descs[4].desc, descs[5].desc,
+ correlator, &correlator);
+ } while ((ret == H_BUSY) && (retry_count--));
- desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
+ if (ret != H_SUCCESS && ret != H_DROPPED) {
+ netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
+ "with rc=%ld\n", ret);
+ return 1;
+ }
+ return 0;
+}
+
+static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int desc_flags;
+ union ibmveth_buf_desc descs[6];
+ int last, i;
+ int force_bounce = 0;
+
+ /*
+ * veth handles a maximum of 6 segments including the header, so
+ * we have to linearize the skb if there are more than this.
+ */
+ if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ /* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
- ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
- ibmveth_error_printk("tx: failed to checksum packet\n");
- tx_dropped++;
+ ((skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol != IPPROTO_TCP) ||
+ (skb->protocol == htons(ETH_P_IPV6) &&
+ ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
+ skb_checksum_help(skb)) {
+
+ netdev_err(netdev, "tx: failed to checksum packet\n");
+ netdev->stats.tx_dropped++;
goto out;
}
+ desc_flags = IBMVETH_BUF_VALID;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
+ unsigned char *buf = skb_transport_header(skb) +
+ skb->csum_offset;
- desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
+ desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
/* Need to zero out the checksum */
buf[0] = 0;
buf[1] = 0;
}
- data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- ibmveth_error_printk("tx: unable to map xmit buffer\n");
+retry_bounce:
+ memset(descs, 0, sizeof(descs));
+
+ /*
+ * If a linear packet is below the rx threshold then
+ * copy it into the static bounce buffer. This avoids the
+ * cost of a TCE insert and remove.
+ */
+ if (force_bounce || (!skb_is_nonlinear(skb) &&
+ (skb->len < tx_copybreak))) {
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
skb->len);
- desc.fields.address = adapter->bounce_buffer_dma;
- tx_map_failed++;
- used_bounce = 1;
- wmb();
- } else
- desc.fields.address = data_dma_addr;
-
- /* send the frame. Arbitrarily set retrycount to 1024 */
- correlator = 0;
- retry_count = 1024;
- do {
- lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
- desc.desc, 0, 0, 0, 0, 0,
- correlator, &correlator);
- } while ((lpar_rc == H_BUSY) && (retry_count--));
-
- if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
- ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
- ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
- (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0,
- skb->len, desc.fields.address);
- tx_send_failed++;
- tx_dropped++;
- } else {
- tx_packets++;
- tx_bytes += skb->len;
- netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+
+ descs[0].fields.flags_len = desc_flags | skb->len;
+ descs[0].fields.address = adapter->bounce_buffer_dma;
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ goto out;
}
- if (!used_bounce)
- dma_unmap_single(&adapter->vdev->dev, data_dma_addr,
- skb->len, DMA_TO_DEVICE);
+ /* Map the header */
+ descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+ goto map_failed;
-out: spin_lock_irqsave(&adapter->stats_lock, flags);
- netdev->stats.tx_dropped += tx_dropped;
- netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_packets;
- adapter->tx_send_failed += tx_send_failed;
- adapter->tx_map_failed += tx_map_failed;
- spin_unlock_irqrestore(&adapter->stats_lock, flags);
+ descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ /* Map the frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned long dma_addr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
+ frag->page_offset, frag->size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
+ goto map_failed_frags;
+
+ descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.address = dma_addr;
+ }
+
+ if (ibmveth_send(adapter, descs)) {
+ adapter->tx_send_failed++;
+ netdev->stats.tx_dropped++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
+
+map_failed_frags:
+ last = i+1;
+ for (i = 0; i < last; i++)
+ dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+ descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+map_failed:
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ netdev_err(netdev, "tx: unable to map xmit buffer\n");
+ adapter->tx_map_failed++;
+ skb_linearize(skb);
+ force_bounce = 1;
+ goto retry_bounce;
}
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
- struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
+ struct ibmveth_adapter *adapter =
+ container_of(napi, struct ibmveth_adapter, napi);
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
- restart_poll:
+restart_poll:
do {
- struct sk_buff *skb;
-
if (!ibmveth_rxq_pending_buffer(adapter))
break;
- rmb();
+ smp_rmb();
if (!ibmveth_rxq_buffer_valid(adapter)) {
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
- ibmveth_debug_printk("recycling invalid buffer\n");
+ netdev_dbg(netdev, "recycling invalid buffer\n");
ibmveth_rxq_recycle_buffer(adapter);
} else {
+ struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
- if (csum_good)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ibmveth_rxq_harvest_buffer(adapter);
+ new_skb = NULL;
+ if (length < rx_copybreak)
+ new_skb = netdev_alloc_skb(netdev, length);
+
+ if (new_skb) {
+ skb_copy_to_linear_data(new_skb,
+ skb->data + offset,
+ length);
+ if (rx_flush)
+ ibmveth_flush_buffer(skb->data,
+ length + offset);
+ skb = new_skb;
+ ibmveth_rxq_recycle_buffer(adapter);
+ } else {
+ ibmveth_rxq_harvest_buffer(adapter);
+ skb_reserve(skb, offset);
+ }
- skb_reserve(skb, offset);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
+ if (csum_good)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
netif_receive_skb(skb); /* send it up */
netdev->stats.rx_packets++;
@@ -1030,7 +1164,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_ENABLE);
- ibmveth_assert(lpar_rc == H_SUCCESS);
+ BUG_ON(lpar_rc != H_SUCCESS);
napi_complete(napi);
@@ -1054,7 +1188,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- ibmveth_assert(lpar_rc == H_SUCCESS);
+ BUG_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1071,8 +1205,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
0);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "entering promisc mode\n", lpar_rc);
}
} else {
struct netdev_hw_addr *ha;
@@ -1082,19 +1217,23 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
IbmVethMcastDisableFiltering |
IbmVethMcastClearFilterTable,
0);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "attempting to clear filter table\n",
+ lpar_rc);
}
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
- // add the multicast address to the filter table
+ /* add the multicast address to the filter table */
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld "
+ "when adding an entry to the filter "
+ "table\n", lpar_rc);
}
}
@@ -1102,8 +1241,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableFiltering,
0);
- if(lpar_rc != H_SUCCESS) {
- ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
+ if (lpar_rc != H_SUCCESS) {
+ netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
+ "enabling filtering\n", lpar_rc);
}
}
}
@@ -1113,49 +1253,47 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
struct ibmveth_adapter *adapter = netdev_priv(dev);
struct vio_dev *viodev = adapter->vdev;
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
- int i;
+ int i, rc;
+ int need_restart = 0;
- if (new_mtu < IBMVETH_MAX_MTU)
+ if (new_mtu < IBMVETH_MIN_MTU)
return -EINVAL;
- for (i = 0; i < IbmVethNumBufferPools; i++)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
break;
- if (i == IbmVethNumBufferPools)
+ if (i == IBMVETH_NUM_BUFF_POOLS)
return -EINVAL;
/* Deactivate all the buffer pools so that the next loop can activate
only the buffer pools necessary to hold the new MTU */
- for (i = 0; i < IbmVethNumBufferPools; i++)
- if (adapter->rx_buff_pool[i].active) {
- ibmveth_free_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
- adapter->rx_buff_pool[i].active = 0;
- }
+ if (netif_running(adapter->netdev)) {
+ need_restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(adapter->netdev);
+ adapter->pool_config = 0;
+ }
/* Look for an active buffer pool that can hold the new MTU */
- for(i = 0; i<IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
- if (netif_running(adapter->netdev)) {
- adapter->pool_config = 1;
- ibmveth_close(adapter->netdev);
- adapter->pool_config = 0;
- dev->mtu = new_mtu;
- vio_cmo_set_dev_desired(viodev,
- ibmveth_get_desired_dma
- (viodev));
- return ibmveth_open(adapter->netdev);
- }
dev->mtu = new_mtu;
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
+ if (need_restart) {
+ return ibmveth_open(adapter->netdev);
+ }
return 0;
}
}
+
+ if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+ return rc;
+
return -EINVAL;
}
@@ -1192,7 +1330,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu);
- for (i = 0; i < IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
if (adapter->rx_buff_pool[i].active)
ret +=
@@ -1221,41 +1359,36 @@ static const struct net_device_ops ibmveth_netdev_ops = {
#endif
};
-static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
{
int rc, i;
- long ret;
struct net_device *netdev;
struct ibmveth_adapter *adapter;
- unsigned long set_attr, ret_attr;
-
unsigned char *mac_addr_p;
unsigned int *mcastFilterSize_p;
+ dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
+ dev->unit_address);
- ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
- dev->unit_address);
-
- mac_addr_p = (unsigned char *) vio_get_attribute(dev,
- VETH_MAC_ADDR, NULL);
- if(!mac_addr_p) {
- printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
- "attribute\n", __FILE__, __LINE__);
- return 0;
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+ NULL);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
}
- mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
VETH_MCAST_FILTER_SIZE, NULL);
- if(!mcastFilterSize_p) {
- printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
- "VETH_MCAST_FILTER_SIZE attribute\n",
- __FILE__, __LINE__);
- return 0;
+ if (!mcastFilterSize_p) {
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
+ "attribute\n");
+ return -EINVAL;
}
netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
- if(!netdev)
+ if (!netdev)
return -ENOMEM;
adapter = netdev_priv(netdev);
@@ -1263,19 +1396,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->mcastFilterSize= *mcastFilterSize_p;
+ adapter->mcastFilterSize = *mcastFilterSize_p;
adapter->pool_config = 0;
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- /* Some older boxes running PHYP non-natively have an OF that
- returns a 8-byte local-mac-address field (and the first
- 2 bytes have to be ignored) while newer boxes' OF return
- a 6-byte field. Note that IEEE 1275 specifies that
- local-mac-address must be a 6-byte field.
- The RPA doc specifies that the first byte must be 10b, so
- we'll just look for it to solve this 8 vs. 6 byte field issue */
-
+ /*
+ * Some older boxes running PHYP non-natively have an OF that returns
+ * a 8-byte local-mac-address field (and the first 2 bytes have to be
+ * ignored) while newer boxes' OF return a 6-byte field. Note that
+ * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+ * The RPA doc specifies that the first byte must be 10b, so we'll
+ * just look for it to solve this 8 vs. 6 byte field issue
+ */
if ((*mac_addr_p & 0x3) != 0x02)
mac_addr_p += 2;
@@ -1286,12 +1419,11 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->features |= NETIF_F_LLTX;
- spin_lock_init(&adapter->stats_lock);
+ netdev->features |= NETIF_F_SG;
memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
- for(i = 0; i<IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
int error;
@@ -1304,41 +1436,25 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
kobject_uevent(kobj, KOBJ_ADD);
}
- ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
+ netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
adapter->buffer_list_dma = DMA_ERROR_CODE;
adapter->filter_list_dma = DMA_ERROR_CODE;
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
- ibmveth_debug_printk("registering netdev...\n");
-
- ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
+ netdev_dbg(netdev, "registering netdev...\n");
- if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
- !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
- (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
- set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
-
- ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
-
- if (ret == H_SUCCESS) {
- adapter->rx_csum = 1;
- netdev->features |= NETIF_F_IP_CSUM;
- } else
- ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
- }
+ ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
rc = register_netdev(netdev);
- if(rc) {
- ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
+ if (rc) {
+ netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
free_netdev(netdev);
return rc;
}
- ibmveth_debug_printk("registered\n");
-
- ibmveth_proc_register_adapter(adapter);
+ netdev_dbg(netdev, "registered\n");
return 0;
}
@@ -1349,114 +1465,23 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
- for(i = 0; i<IbmVethNumBufferPools; i++)
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
unregister_netdev(netdev);
- ibmveth_proc_unregister_adapter(adapter);
-
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return 0;
}
-#ifdef CONFIG_PROC_FS
-static void ibmveth_proc_register_driver(void)
-{
- ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
- if (ibmveth_proc_dir) {
- }
-}
-
-static void ibmveth_proc_unregister_driver(void)
-{
- remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
-}
-
-static int ibmveth_show(struct seq_file *seq, void *v)
-{
- struct ibmveth_adapter *adapter = seq->private;
- char *current_mac = (char *) adapter->netdev->dev_addr;
- char *firmware_mac = (char *) &adapter->mac_addr;
-
- seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
-
- seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
- seq_printf(seq, "Current MAC: %pM\n", current_mac);
- seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
-
- seq_printf(seq, "\nAdapter Statistics:\n");
- seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed);
- seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed);
- seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles);
- seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem);
- seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure);
- seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer);
- seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer);
-
- return 0;
-}
-
-static int ibmveth_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ibmveth_show, PDE(inode)->data);
-}
-
-static const struct file_operations ibmveth_proc_fops = {
- .owner = THIS_MODULE,
- .open = ibmveth_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
-{
- struct proc_dir_entry *entry;
- if (ibmveth_proc_dir) {
- char u_addr[10];
- sprintf(u_addr, "%x", adapter->vdev->unit_address);
- entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
- &ibmveth_proc_fops, adapter);
- if (!entry)
- ibmveth_error_printk("Cannot create adapter proc entry");
- }
-}
-
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
-{
- if (ibmveth_proc_dir) {
- char u_addr[10];
- sprintf(u_addr, "%x", adapter->vdev->unit_address);
- remove_proc_entry(u_addr, ibmveth_proc_dir);
- }
-}
-
-#else /* CONFIG_PROC_FS */
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
-{
-}
-
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
-{
-}
-static void ibmveth_proc_register_driver(void)
-{
-}
-
-static void ibmveth_proc_unregister_driver(void)
-{
-}
-#endif /* CONFIG_PROC_FS */
-
static struct attribute veth_active_attr;
static struct attribute veth_num_attr;
static struct attribute veth_size_attr;
-static ssize_t veth_pool_show(struct kobject * kobj,
- struct attribute * attr, char * buf)
+static ssize_t veth_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
@@ -1471,8 +1496,8 @@ static ssize_t veth_pool_show(struct kobject * kobj,
return 0;
}
-static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
-const char * buf, size_t count)
+static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
struct ibmveth_buff_pool *pool = container_of(kobj,
struct ibmveth_buff_pool,
@@ -1486,8 +1511,9 @@ const char * buf, size_t count)
if (attr == &veth_active_attr) {
if (value && !pool->active) {
if (netif_running(netdev)) {
- if(ibmveth_alloc_buffer_pool(pool)) {
- ibmveth_error_printk("unable to alloc pool\n");
+ if (ibmveth_alloc_buffer_pool(pool)) {
+ netdev_err(netdev,
+ "unable to alloc pool\n");
return -ENOMEM;
}
pool->active = 1;
@@ -1496,14 +1522,15 @@ const char * buf, size_t count)
adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
- } else
+ } else {
pool->active = 1;
+ }
} else if (!value && pool->active) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
can hold a packet of the size of the MTU */
- for (i = 0; i < IbmVethNumBufferPools; i++) {
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
if (pool == &adapter->rx_buff_pool[i])
continue;
if (!adapter->rx_buff_pool[i].active)
@@ -1512,8 +1539,8 @@ const char * buf, size_t count)
break;
}
- if (i == IbmVethNumBufferPools) {
- ibmveth_error_printk("no active pool >= MTU\n");
+ if (i == IBMVETH_NUM_BUFF_POOLS) {
+ netdev_err(netdev, "no active pool >= MTU\n");
return -EPERM;
}
@@ -1528,9 +1555,9 @@ const char * buf, size_t count)
pool->active = 0;
}
} else if (attr == &veth_num_attr) {
- if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
return -EINVAL;
- else {
+ } else {
if (netif_running(netdev)) {
adapter->pool_config = 1;
ibmveth_close(netdev);
@@ -1538,13 +1565,14 @@ const char * buf, size_t count)
pool->size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
- } else
+ } else {
pool->size = value;
+ }
}
} else if (attr == &veth_size_attr) {
- if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
return -EINVAL;
- else {
+ } else {
if (netif_running(netdev)) {
adapter->pool_config = 1;
ibmveth_close(netdev);
@@ -1552,8 +1580,9 @@ const char * buf, size_t count)
pool->buff_size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
- } else
+ } else {
pool->buff_size = value;
+ }
}
}
@@ -1563,16 +1592,16 @@ const char * buf, size_t count)
}
-#define ATTR(_name, _mode) \
- struct attribute veth_##_name##_attr = { \
- .name = __stringify(_name), .mode = _mode, \
- };
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, \
+ };
static ATTR(active, 0644);
static ATTR(num, 0644);
static ATTR(size, 0644);
-static struct attribute * veth_pool_attrs[] = {
+static struct attribute *veth_pool_attrs[] = {
&veth_active_attr,
&veth_num_attr,
&veth_size_attr,
@@ -1597,7 +1626,7 @@ static int ibmveth_resume(struct device *dev)
return 0;
}
-static struct vio_device_id ibmveth_device_table[] __devinitdata= {
+static struct vio_device_id ibmveth_device_table[] __devinitdata = {
{ "network", "IBM,l-lan"},
{ "", "" }
};
@@ -1621,9 +1650,8 @@ static struct vio_driver ibmveth_driver = {
static int __init ibmveth_module_init(void)
{
- ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
-
- ibmveth_proc_register_driver();
+ printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
+ ibmveth_driver_string, ibmveth_driver_version);
return vio_register_driver(&ibmveth_driver);
}
@@ -1631,7 +1659,6 @@ static int __init ibmveth_module_init(void)
static void __exit ibmveth_module_exit(void)
{
vio_unregister_driver(&ibmveth_driver);
- ibmveth_proc_unregister_driver();
}
module_init(ibmveth_module_init);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index ec76ace66c6b..43a794fab9ff 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -1,26 +1,28 @@
-/**************************************************************************/
-/* */
-/* IBM eServer i/[Series Virtual Ethernet Device Driver */
-/* Copyright (C) 2003 IBM Corp. */
-/* Dave Larson (larson1@us.ibm.com) */
-/* Santiago Leon (santil@us.ibm.com) */
-/* */
-/* This program is free software; you can redistribute it and/or modify */
-/* it under the terms of the GNU General Public License as published by */
-/* the Free Software Foundation; either version 2 of the License, or */
-/* (at your option) any later version. */
-/* */
-/* This program is distributed in the hope that it will be useful, */
-/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
-/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
-/* GNU General Public License for more details. */
-/* */
-/* You should have received a copy of the GNU General Public License */
-/* along with this program; if not, write to the Free Software */
-/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
-/* USA */
-/* */
-/**************************************************************************/
+/*
+ * IBM Power Virtual Ethernet Device Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2003, 2010
+ *
+ * Authors: Dave Larson <larson1@us.ibm.com>
+ * Santiago Leon <santil@linux.vnet.ibm.com>
+ * Brian King <brking@linux.vnet.ibm.com>
+ * Robert Jennings <rcj@linux.vnet.ibm.com>
+ * Anton Blanchard <anton@au.ibm.com>
+ */
#ifndef _IBMVETH_H
#define _IBMVETH_H
@@ -92,17 +94,17 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define h_change_logical_lan_mac(ua, mac) \
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
-#define IbmVethNumBufferPools 5
+#define IBMVETH_NUM_BUFF_POOLS 5
#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
-#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MIN_MTU 68
#define IBMVETH_MAX_POOL_COUNT 4096
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
-static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_count[] = { 256, 512, 256, 256, 256 };
static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -142,13 +144,15 @@ struct ibmveth_adapter {
void * filter_list_addr;
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
@@ -158,7 +162,6 @@ struct ibmveth_adapter {
u64 rx_no_buffer;
u64 tx_map_failed;
u64 tx_send_failed;
- spinlock_t stats_lock;
};
struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 6e63d9a7fc75..44e0ff1494e0 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -143,7 +143,7 @@ struct igb_buffer {
u16 next_to_watch;
unsigned int bytecount;
u16 gso_segs;
- union skb_shared_tx shtx;
+ u8 tx_flags;
u8 mapped_as_page;
};
/* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b4e5895f5f9..c4d861b557ca 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1888,9 +1888,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
- setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+ setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
- setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+ setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
(unsigned long) adapter);
INIT_WORK(&adapter->reset_task, igb_reset_task);
@@ -3954,7 +3954,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
+ tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
/* multiply data chunks by size of headers */
tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
tx_ring->buffer_info[i].gso_segs = gso_segs;
@@ -4088,7 +4088,6 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
u32 tx_flags = 0;
u16 first;
u8 hdr_len = 0;
- union skb_shared_tx *shtx = skb_tx(skb);
/* need: 1 descriptor per page,
* + 2 desc gap to keep tail from touching head,
@@ -4100,8 +4099,8 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (unlikely(shtx->hardware)) {
- shtx->in_progress = 1;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
}
@@ -5319,7 +5318,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!buffer_info->shtx.hardware) ||
+ if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -5456,7 +5455,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
static inline void igb_rx_checksum_adv(struct igb_ring *ring,
u32 status_err, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
@@ -5500,7 +5499,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* values must belong to this one here and therefore we don't need to
* compare any of the additional attributes stored for it.
*
- * If nothing went wrong, then it should have a skb_shared_tx that we
+ * If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
if (staterr & E1000_RXDADV_STAT_TSIP) {
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index c539f7c9c3e0..c7fab80d2490 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -103,7 +103,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
u32 status_err, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
if ((status_err & E1000_RXD_STAT_IXSM) ||
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 0b3f6df5cff7..c8ee8d28767b 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -827,7 +827,7 @@ static void ioc3_mii_start(struct ioc3_private *ip)
{
ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
ip->ioc3_timer.data = (unsigned long) ip;
- ip->ioc3_timer.function = &ioc3_timer;
+ ip->ioc3_timer.function = ioc3_timer;
add_timer(&ip->ioc3_timer);
}
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 72e3d2da9e9f..dc0198092343 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1213,7 +1213,7 @@ static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
skb_put(skb, framelen);
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_rx(skb);
sp->rx_buff[entry] = NULL;
}
@@ -1278,7 +1278,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
jumbo->skb->protocol =
eth_type_trans(jumbo->skb, dev);
- jumbo->skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(jumbo->skb);
netif_rx(jumbo->skb);
}
}
@@ -1476,7 +1476,7 @@ static int ipg_nic_rx(struct net_device *dev)
* IP/TCP/UDP frame was received. Let the
* upper layer decide.
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Hand off frame for higher layer processing.
* The function netif_rx() releases the sk_buff
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 5b1036ac38d7..74b20f179cea 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -734,7 +734,7 @@ static int mcs_net_open(struct net_device *netdev)
}
if (!mcs_setup_urbs(mcs))
- goto error3;
+ goto error3;
ret = mcs_receive_start(mcs);
if (ret)
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index edd5666f0ffb..9e3f4f54281d 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -748,7 +748,6 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
struct net_device *ndev;
struct sh_irda_self *self;
struct resource *res;
- char clk_name[8];
int irq;
int err = -ENOMEM;
@@ -775,10 +774,9 @@ static int __devinit sh_irda_probe(struct platform_device *pdev)
if (err)
goto err_mem_2;
- snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
- self->clk = clk_get(&pdev->dev, clk_name);
+ self->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(self->clk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ dev_err(&pdev->dev, "cannot get irda clock\n");
goto err_mem_3;
}
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index b0a6cd815be1..67c0ad42d818 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1182,12 +1182,13 @@ F01_E */
skb = dev_alloc_skb(len + 1 - 4);
/*
- * if frame size,data ptr,or skb ptr are wrong ,the get next
+ * if frame size, data ptr, or skb ptr are wrong, then get next
* entry.
*/
if ((skb == NULL) || (skb->data == NULL) ||
(self->rx_buff.data == NULL) || (len < 6)) {
self->netdev->stats.rx_dropped++;
+ kfree_skb(skb);
return TRUE;
}
skb_reserve(skb, 1);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index ba1de5973fb2..8df645e78f2e 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1524,7 +1524,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_rx(skb); /* send it up */
dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 45fc89b9ba64..c2f6e71e1181 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -470,7 +470,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &ixgb_watchdog;
+ adapter->watchdog_timer.function = ixgb_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
@@ -1905,7 +1905,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
*/
if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
return;
}
@@ -1913,7 +1913,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
/* now look at the TCP checksum error bit */
if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
/* let the stack verify checksum errors */
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
adapter->hw_csum_rx_error++;
} else {
/* TCP checksum is good */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 9e15eb93860e..5cebc3755b64 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -69,15 +69,20 @@
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_64 64 /* Used for packet split */
-#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
-#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
#define IXGBE_RXBUFFER_2048 2048
#define IXGBE_RXBUFFER_4096 4096
#define IXGBE_RXBUFFER_8192 8192
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
+/*
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
@@ -251,11 +256,11 @@ struct ixgbe_q_vector {
(R)->next_to_clean - (R)->next_to_use - 1)
#define IXGBE_RX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
#define IXGBE_TX_CTXTDESC_ADV(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
#ifdef IXGBE_FCOE
@@ -448,9 +453,20 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
+ struct net_device *,
+ struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+ struct ixgbe_tx_buffer *);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cleaned_count);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3e06a61da921..e80657c75506 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1910,56 +1910,27 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
/*
- * Program the relevant mask registers. If src/dst_port or src/dst_addr
- * are zero, then assume a full mask for that field. Also assume that
- * a VLAN of 0 is unspecified, so mask that out as well. L4type
- * cannot be masked out in this implementation.
+ * Program the relevant mask registers. L4type cannot be
+ * masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
- if (src_ipv4 == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
-
- if (dst_ipv4 == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
case IXGBE_ATR_L4TYPE_TCP:
- if (src_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- input_masks->src_port_mask);
-
- if (dst_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (0xffff << 16)));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (input_masks->dst_port_mask << 16)));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
break;
case IXGBE_ATR_L4TYPE_UDP:
- if (src_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- input_masks->src_port_mask);
-
- if (dst_port == 0)
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (0xffff << 16)));
- else
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (input_masks->src_port_mask << 16)));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
break;
default:
/* this already would have failed above */
@@ -1967,11 +1938,11 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
}
/* Program the last mask register, FDIRM */
- if (input_masks->vlan_id_mask || !vlan_id)
+ if (input_masks->vlan_id_mask)
/* Mask both VLAN and VLANP - bits 0 and 1 */
fdirm |= 0x3;
- if (input_masks->data_mask || !flex_bytes)
+ if (input_masks->data_mask)
/* Flex bytes need masking, so mask the whole thing - bit 4 */
fdirm |= 0x10;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dcebc82c6f4d..25ef8b197373 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -820,16 +820,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, ixgbe_driver_name, 32);
- strncpy(drvinfo->version, ixgbe_driver_version, 32);
-
- sprintf(firmware_version, "%d.%d-%d",
- (adapter->eeprom_version & 0xF000) >> 12,
- (adapter->eeprom_version & 0x0FF0) >> 4,
- adapter->eeprom_version & 0x000F);
-
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
+
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ (adapter->eeprom_version & 0xF000) >> 12,
+ (adapter->eeprom_version & 0x0FF0) >> 4,
+ adapter->eeprom_version & 0x000F);
+
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version));
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1435,9 +1438,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
u32 reg_ctl;
- int i;
/* shut down the DMA engines now so they can be reinitialized later */
@@ -1445,14 +1446,15 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
/* now Tx */
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
+ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
+
if (hw->mac.type == ixgbe_mac_82599EB) {
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
@@ -1461,221 +1463,57 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
ixgbe_reset(adapter);
- if (tx_ring->desc && tx_ring->tx_buffer_info) {
- for (i = 0; i < tx_ring->count; i++) {
- struct ixgbe_tx_buffer *buf =
- &(tx_ring->tx_buffer_info[i]);
- if (buf->dma)
- dma_unmap_single(&pdev->dev, buf->dma,
- buf->length, DMA_TO_DEVICE);
- if (buf->skb)
- dev_kfree_skb(buf->skb);
- }
- }
-
- if (rx_ring->desc && rx_ring->rx_buffer_info) {
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *buf =
- &(rx_ring->rx_buffer_info[i]);
- if (buf->dma)
- dma_unmap_single(&pdev->dev, buf->dma,
- IXGBE_RXBUFFER_2048,
- DMA_FROM_DEVICE);
- if (buf->skb)
- dev_kfree_skb(buf->skb);
- }
- }
-
- if (tx_ring->desc) {
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
- tx_ring->desc = NULL;
- }
- if (rx_ring->desc) {
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
- rx_ring->desc = NULL;
- }
-
- kfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
- kfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
+ ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
u32 rctl, reg_data;
- int i, ret_val;
+ int ret_val;
+ int err;
/* Setup Tx descriptor ring and Tx buffers */
+ tx_ring->count = IXGBE_DEFAULT_TXD;
+ tx_ring->queue_index = 0;
+ tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
+ tx_ring->numa_node = adapter->node;
- if (!tx_ring->count)
- tx_ring->count = IXGBE_DEFAULT_TXD;
-
- tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
- sizeof(struct ixgbe_tx_buffer),
- GFP_KERNEL);
- if (!(tx_ring->tx_buffer_info)) {
- ret_val = 1;
- goto err_nomem;
- }
-
- tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
- tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
- if (!(tx_ring->desc)) {
- ret_val = 2;
- goto err_nomem;
- }
- tx_ring->next_to_use = tx_ring->next_to_clean = 0;
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
- ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
- ((u64) tx_ring->dma >> 32));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
- tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
- reg_data |= IXGBE_HLREG0_TXPADEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+ err = ixgbe_setup_tx_resources(adapter, tx_ring);
+ if (err)
+ return 1;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
}
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
- reg_data |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
-
- for (i = 0; i < tx_ring->count; i++) {
- union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
- struct sk_buff *skb;
- unsigned int size = 1024;
-
- skb = alloc_skb(size, GFP_KERNEL);
- if (!skb) {
- ret_val = 3;
- goto err_nomem;
- }
- skb_put(skb, size);
- tx_ring->tx_buffer_info[i].skb = skb;
- tx_ring->tx_buffer_info[i].length = skb->len;
- tx_ring->tx_buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- desc->read.buffer_addr =
- cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
- desc->read.cmd_type_len = cpu_to_le32(skb->len);
- desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
- IXGBE_TXD_CMD_IFCS |
- IXGBE_TXD_CMD_RS);
- desc->read.olinfo_status = 0;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- desc->read.olinfo_status |=
- (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
- }
+ ixgbe_configure_tx_ring(adapter, tx_ring);
/* Setup Rx Descriptor ring and Rx buffers */
-
- if (!rx_ring->count)
- rx_ring->count = IXGBE_DEFAULT_RXD;
-
- rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
- sizeof(struct ixgbe_rx_buffer),
- GFP_KERNEL);
- if (!(rx_ring->rx_buffer_info)) {
+ rx_ring->count = IXGBE_DEFAULT_RXD;
+ rx_ring->queue_index = 0;
+ rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
+ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
+ rx_ring->numa_node = adapter->node;
+
+ err = ixgbe_setup_rx_resources(adapter, rx_ring);
+ if (err) {
ret_val = 4;
goto err_nomem;
}
- rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
- if (!(rx_ring->desc)) {
- ret_val = 5;
- goto err_nomem;
- }
- rx_ring->next_to_use = rx_ring->next_to_clean = 0;
-
rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
- ((u64)rx_ring->dma & 0xFFFFFFFF));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
- ((u64) rx_ring->dma >> 32));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
- reg_data &= ~IXGBE_HLREG0_LPBK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
-#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
- Threshold Size mask */
- reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
-#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
- reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
- reg_data |= adapter->hw.mac.mc_filter_type;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
-
- reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
- reg_data |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0]->reg_idx;
- u32 k;
- for (k = 0; k < 10; k++) {
- if (IXGBE_READ_REG(&adapter->hw,
- IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- }
+ ixgbe_configure_rx_ring(adapter, rx_ring);
rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
- for (i = 0; i < rx_ring->count; i++) {
- union ixgbe_adv_rx_desc *rx_desc =
- IXGBE_RX_DESC_ADV(*rx_ring, i);
- struct sk_buff *skb;
-
- skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
- if (!skb) {
- ret_val = 6;
- goto err_nomem;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- rx_ring->rx_buffer_info[i].skb = skb;
- rx_ring->rx_buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data,
- IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
- rx_desc->read.pkt_addr =
- cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
- memset(skb->data, 0x00, skb->len);
- }
-
return 0;
err_nomem:
@@ -1689,16 +1527,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
u32 reg_data;
/* right now we only support MAC loopback in the driver */
-
- /* Setup MAC loopback */
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+ /* Setup MAC loopback */
reg_data |= IXGBE_HLREG0_LPBK;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+ reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
+
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
reg_data &= ~IXGBE_AUTOC_LMS_MASK;
reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ msleep(10);
/* Disable Atlas Tx lanes; re-enabled in reset path */
if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1756,15 +1599,81 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
return 13;
}
+static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct ixgbe_ring *tx_ring,
+ unsigned int size)
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ const int bufsz = rx_ring->rx_buf_len;
+ u32 staterr;
+ u16 rx_ntc, tx_ntc, count = 0;
+
+ /* initialize next to clean and descriptor values */
+ rx_ntc = rx_ring->next_to_clean;
+ tx_ntc = tx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ /* check Rx buffer */
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
+ dma_unmap_single(&adapter->pdev->dev,
+ rx_buffer_info->dma,
+ bufsz,
+ DMA_FROM_DEVICE);
+ rx_buffer_info->dma = 0;
+
+ /* verify contents of skb */
+ if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
+ count++;
+
+ /* unmap buffer on Tx side */
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+
+ /* increment Rx/Tx next to clean counters */
+ rx_ntc++;
+ if (rx_ntc == rx_ring->count)
+ rx_ntc = 0;
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+
+ /* fetch next descriptor */
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ /* re-map buffers to ring, store next to clean values */
+ ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+ rx_ring->next_to_clean = rx_ntc;
+ tx_ring->next_to_clean = tx_ntc;
+
+ return count;
+}
+
static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int i, j, k, l, lc, good_cnt, ret_val = 0;
- unsigned long time;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size = 1024;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return 11;
+
+ /* place data into test skb */
+ ixgbe_create_lbtest_frame(skb, size);
+ skb_put(skb, size);
/*
* Calculate the loop count based on the largest descriptor ring
@@ -1777,54 +1686,40 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
else
lc = ((rx_ring->count / 64) * 2) + 1;
- k = l = 0;
for (j = 0; j <= lc; j++) {
- for (i = 0; i < 64; i++) {
- ixgbe_create_lbtest_frame(
- tx_ring->tx_buffer_info[k].skb,
- 1024);
- dma_sync_single_for_device(&pdev->dev,
- tx_ring->tx_buffer_info[k].dma,
- tx_ring->tx_buffer_info[k].length,
- DMA_TO_DEVICE);
- if (unlikely(++k == tx_ring->count))
- k = 0;
- }
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
- msleep(200);
- /* set the start time for the receive */
- time = jiffies;
+ /* reset count of good packets */
good_cnt = 0;
- do {
- /* receive the sent packets */
- dma_sync_single_for_cpu(&pdev->dev,
- rx_ring->rx_buffer_info[l].dma,
- IXGBE_RXBUFFER_2048,
- DMA_FROM_DEVICE);
- ret_val = ixgbe_check_lbtest_frame(
- rx_ring->rx_buffer_info[l].skb, 1024);
- if (!ret_val)
+
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < 64; i++) {
+ skb_get(skb);
+ tx_ret_val = ixgbe_xmit_frame_ring(skb,
+ adapter->netdev,
+ adapter,
+ tx_ring);
+ if (tx_ret_val == NETDEV_TX_OK)
good_cnt++;
- if (++l == rx_ring->count)
- l = 0;
- /*
- * time + 20 msecs (200 msecs on 2.4) is more than
- * enough time to complete the receives, if it's
- * exceeded, break and error off
- */
- } while (good_cnt < 64 && jiffies < (time + 20));
+ }
+
if (good_cnt != 64) {
- /* ret_val is the same as mis-compare */
- ret_val = 13;
+ ret_val = 12;
break;
}
- if (jiffies >= (time + 20)) {
- /* Error code for time out error */
- ret_val = 14;
+
+ /* allow 200 milliseconds for packets to go from Tx to Rx */
+ msleep(200);
+
+ good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
+ tx_ring, size);
+ if (good_cnt != 64) {
+ ret_val = 13;
break;
}
}
+ /* free the original skb */
+ kfree_skb(skb);
+
return ret_val;
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 072327c5e41a..2f1de8b90f9e 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -304,12 +304,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (!ixgbe_rx_is_fcoe(rx_desc))
goto ddp_out;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC)
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
fh = (struct fc_frame_header *)(skb->data +
@@ -471,7 +472,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
/* write context desc */
i = tx_ring->next_to_use;
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e32af434cc9d..d03eef96c0ba 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
- "Intel(R) 10 Gigabit PCI Express Network Driver";
+ "Intel(R) 10 Gigabit PCI Express Network Driver";
#define DRV_VERSION "2.0.84-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
#ifdef CONFIG_IXGBE_DCA
static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
- void *p);
+ void *p);
static struct notifier_block dca_notifier = {
.notifier_call = ixgbe_notify_dca,
.next = NULL,
@@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = {
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs,
+ "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,8 +169,8 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
/* take a breather then clean up driver data */
msleep(100);
- if (adapter->vfinfo)
- kfree(adapter->vfinfo);
+
+ kfree(adapter->vfinfo);
adapter->vfinfo = NULL;
adapter->num_vfs = 0;
@@ -282,17 +282,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
break;
default:
- printk(KERN_INFO "%-15s %08x\n", reginfo->name,
+ pr_info("%-15s %08x\n", reginfo->name,
IXGBE_READ_REG(hw, reginfo->ofs));
return;
}
for (i = 0; i < 8; i++) {
snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
- printk(KERN_ERR "%-15s ", rname);
+ pr_err("%-15s", rname);
for (j = 0; j < 8; j++)
- printk(KERN_CONT "%08x ", regs[i*8+j]);
- printk(KERN_CONT "\n");
+ pr_cont(" %08x", regs[i*8+j]);
+ pr_cont("\n");
}
}
@@ -322,18 +322,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
+ pr_info("Device Name state "
"trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
reginfo->name; reginfo++) {
ixgbe_regdump(hw, reginfo);
@@ -344,13 +344,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
- "leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
tx_buffer_info =
&tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)tx_buffer_info->dma,
tx_buffer_info->length,
@@ -377,18 +376,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "T [desc] [address 63:0 ] "
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] "
"[PlPOIdStDDt Ln] [bi->dma ] "
"leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
" %04X %3X %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@@ -399,13 +398,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
tx_buffer_info->skb);
if (i == tx_ring->next_to_use &&
i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
+ pr_cont(" NTC/U\n");
else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ pr_cont(" NTU\n");
else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ pr_cont(" NTC\n");
else
- printk(KERN_CONT "\n");
+ pr_cont("\n");
if (netif_msg_pktdata(adapter) &&
tx_buffer_info->dma != 0)
@@ -419,11 +418,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "%5d %5X %5X\n", n,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info("%5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
@@ -454,30 +453,30 @@ rx_ring_summary:
*/
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] "
"[ HeadBuf DD] [bi->dma ] [bi->skb] "
"<-- Adv Rx Read format\n");
- printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] "
"[vl er S cks ln] ---------------- [bi->skb] "
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
rx_buffer_info = &rx_ring->rx_buffer_info[i];
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
if (staterr & IXGBE_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
+ pr_info("RWB[0x%03X] %016llX "
"%016llX ---------------- %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
rx_buffer_info->skb);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
+ pr_info("R [0x%03X] %016llX "
"%016llX %016llX %p", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
@@ -503,11 +502,11 @@ rx_ring_summary:
}
if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ pr_cont(" NTU\n");
else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ pr_cont(" NTC\n");
else
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
}
@@ -523,7 +522,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
/* Let firmware take over control of h/w */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
}
static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -533,7 +532,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
/* Let firmware know the driver has taken over */
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
/*
@@ -545,7 +544,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
*
*/
static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
- u8 queue, u8 msix_vector)
+ u8 queue, u8 msix_vector)
{
u32 ivar, index;
struct ixgbe_hw *hw = &adapter->hw;
@@ -586,7 +585,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
}
static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+ u64 qmask)
{
u32 mask;
@@ -601,9 +600,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
+ struct ixgbe_tx_buffer
+ *tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
@@ -637,7 +636,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
* Returns : true if in xon state (currently not paused)
*/
static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
u32 txoff = IXGBE_TFCS_TXOFF;
@@ -682,8 +681,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
}
static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- unsigned int eop)
+ struct ixgbe_ring *tx_ring,
+ unsigned int eop)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -695,7 +694,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
ixgbe_tx_xon_state(adapter, tx_ring)) {
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
@@ -732,7 +731,7 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
* @tx_ring: tx ring to clean
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
@@ -743,7 +742,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
(count < tx_ring->work_limit)) {
@@ -751,7 +750,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
struct sk_buff *skb;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
skb = tx_buffer_info->skb;
@@ -781,7 +780,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
ixgbe_unmap_and_free_tx_resource(adapter,
- tx_buffer_info);
+ tx_buffer_info);
tx_desc->wb.status = 0;
@@ -791,14 +790,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -832,7 +831,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
u32 rxctrl;
int cpu = get_cpu();
@@ -846,13 +845,13 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
}
rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
rx_ring->cpu = cpu;
}
@@ -860,7 +859,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
u32 txctrl;
int cpu = get_cpu();
@@ -878,7 +877,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
}
@@ -946,16 +945,15 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
* @rx_desc: rx descriptor
**/
static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb, u8 status,
- struct ixgbe_ring *ring,
- union ixgbe_adv_rx_desc *rx_desc)
+ struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct napi_struct *napi = &q_vector->napi;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb_record_rx_queue(skb, ring->queue_index);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -981,7 +979,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
{
u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1017,7 +1015,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
}
static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbe_ring *rx_ring, u32 val)
+ struct ixgbe_ring *rx_ring, u32 val)
{
/*
* Force memory writes to complete before letting h/w
@@ -1033,25 +1031,27 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
**/
-static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ int cleaned_count)
{
+ struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
if (!bi->page_dma &&
(rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
+ bi->page = netdev_alloc_page(netdev);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
@@ -1063,29 +1063,28 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
}
bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
+ bi->page_offset,
+ (PAGE_SIZE / 2),
DMA_FROM_DEVICE);
}
if (!bi->skb) {
- struct sk_buff *skb;
- /* netdev_alloc_skb reserves 32 bytes up front!! */
- uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
+ bufsz);
+ bi->skb = skb;
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ }
- /* advance the data pointer to the next cache line */
- skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
- - skb->data));
-
- bi->skb = skb;
- bi->dma = dma_map_single(&pdev->dev, skb->data,
- rx_ring->rx_buf_len,
+ if (!bi->dma) {
+ bi->dma = dma_map_single(&pdev->dev,
+ bi->skb->data,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -1095,6 +1094,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
}
i++;
@@ -1126,8 +1126,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
{
return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >>
- IXGBE_RXDADV_RSCCNT_SHIFT;
+ IXGBE_RXDADV_RSCCNT_MASK) >>
+ IXGBE_RXDADV_RSCCNT_SHIFT;
}
/**
@@ -1140,7 +1140,7 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
* turns it into the frag list owner.
**/
static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
- u64 *count)
+ u64 *count)
{
unsigned int frag_list_size = 0;
@@ -1168,8 +1168,8 @@ struct ixgbe_rsc_cb {
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
- struct ixgbe_ring *rx_ring,
- int *work_done, int work_to_do)
+ struct ixgbe_ring *rx_ring,
+ int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
@@ -1188,7 +1188,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
@@ -1231,9 +1231,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
dma_unmap_single(&pdev->dev,
- rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
skb_put(skb, len);
@@ -1244,9 +1244,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
(page_count(rx_buffer_info->page) != 1))
@@ -1263,7 +1263,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (i == rx_ring->count)
i = 0;
- next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
prefetch(next_rxd);
cleaned_count++;
@@ -1280,18 +1280,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
+ skb = ixgbe_transform_rsc_queue(skb,
+ &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->delay_unmap) {
dma_unmap_single(&pdev->dev,
IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
+ rx_ring->rsc_count +=
+ skb_shinfo(skb)->nr_frags;
else
rx_ring->rsc_count++;
rx_ring->rsc_flush++;
@@ -1403,24 +1405,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
q_vector = adapter->q_vector[v_idx];
/* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
+ adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
+ adapter->num_rx_queues,
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
+ adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+ adapter->num_tx_queues,
+ r_idx + 1);
}
if (q_vector->txr_count && !q_vector->rxr_count)
@@ -1435,7 +1437,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- v_idx);
+ v_idx);
else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
ixgbe_set_ivar(adapter, -1, 1, v_idx);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
@@ -1477,8 +1479,8 @@ enum latency_range {
* parameter (see ixgbe_param.c)
**/
static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
{
unsigned int retval = itr_setting;
u32 timepassed_us;
@@ -1567,30 +1569,30 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
for (i = 0; i < q_vector->txr_count; i++) {
tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
+ q_vector->tx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
/* if the result for this queue would decrease interrupt
* rate for this vector then use that result */
q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
+ q_vector->rx_itr - 1 : ret_itr);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -1627,39 +1629,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
static void ixgbe_check_overtemp_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- check_overtemp_task);
+ struct ixgbe_adapter,
+ check_overtemp_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_T3_LOM: {
- u32 autoneg;
- bool link_up = false;
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
+ return;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM: {
+ u32 autoneg;
+ bool link_up = false;
- if (hw->mac.ops.check_link)
- hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ if (hw->mac.ops.check_link)
+ hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
- if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
- (eicr & IXGBE_EICR_LSC))
- /* Check if this is due to overtemp */
- if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
- break;
- }
+ if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
+ (eicr & IXGBE_EICR_LSC))
+ /* Check if this is due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
+ break;
+ return;
+ }
+ default:
+ if (!(eicr & IXGBE_EICR_GPI_SDP0))
return;
- default:
- if (!(eicr & IXGBE_EICR_GPI_SDP0))
- return;
- break;
- }
- e_crit(drv, "Network adapter has been stopped because it has "
- "over heated. Restart the computer. If the problem "
- "persists, power off the system and replace the "
- "adapter\n");
- /* write to clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+ break;
}
+ e_crit(drv,
+ "Network adapter has been stopped because it has over heated. "
+ "Restart the computer. If the problem persists, "
+ "power off the system and replace the adapter\n");
+ /* write to clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1746,9 +1749,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state))
+ &tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
}
}
@@ -1777,7 +1780,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
}
static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+ u64 qmask)
{
u32 mask;
@@ -1809,7 +1812,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
/* EIAM disabled interrupts (on this vector) for us */
@@ -1837,7 +1840,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
if (!q_vector->rxr_count)
@@ -1867,7 +1870,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1876,7 +1879,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
/* EIAM disabled interrupts (on this vector) for us */
@@ -1896,7 +1899,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *rx_ring = NULL;
int work_done = 0;
@@ -1918,7 +1921,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
+ ((u64)1 << q_vector->v_idx));
}
return work_done;
@@ -1935,7 +1938,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring = NULL;
int work_done = 0, i;
@@ -1951,7 +1954,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
#endif
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
+ r_idx + 1);
}
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -1967,7 +1970,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
#endif
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
+ r_idx + 1);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1979,7 +1982,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
- ((u64)1 << q_vector->v_idx));
+ ((u64)1 << q_vector->v_idx));
return 0;
}
@@ -1997,7 +2000,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *tx_ring = NULL;
int work_done = 0;
@@ -2019,14 +2022,15 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (adapter->tx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ ixgbe_irq_enable_queues(adapter,
+ ((u64)1 << q_vector->v_idx));
}
return work_done;
}
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
+ int r_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
@@ -2035,7 +2039,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int t_idx)
+ int t_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
@@ -2055,7 +2059,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* mapping configurations in here.
**/
static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+ int vectors)
{
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
@@ -2122,7 +2126,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
irqreturn_t (*handler)(int, void *);
int i, vector, q_vectors, err;
- int ri=0, ti=0;
+ int ri = 0, ti = 0;
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -2133,26 +2137,24 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
goto out;
#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
- &ixgbe_msix_clean_many)
+ (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
+ &ixgbe_msix_clean_many)
for (vector = 0; vector < q_vectors; vector++) {
handler = SET_HANDLER(adapter->q_vector[vector]);
- if(handler == &ixgbe_msix_clean_rx) {
+ if (handler == &ixgbe_msix_clean_rx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "rx", ri++);
- }
- else if(handler == &ixgbe_msix_clean_tx) {
+ } else if (handler == &ixgbe_msix_clean_tx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "tx", ti++);
- }
- else
+ } else
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "TxRx", vector);
err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
@@ -2162,7 +2164,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->name[vector], "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2173,7 +2175,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
for (i = vector - 1; i >= 0; i--)
free_irq(adapter->msix_entries[--vector].vector,
- adapter->q_vector[i]);
+ adapter->q_vector[i]);
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
@@ -2191,13 +2193,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -2343,10 +2345,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
- netdev->name, netdev);
+ netdev->name, netdev);
} else {
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
- netdev->name, netdev);
+ netdev->name, netdev);
}
if (err)
@@ -2370,7 +2372,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
i--;
for (; i >= 0; i--) {
free_irq(adapter->msix_entries[i].vector,
- adapter->q_vector[i]);
+ adapter->q_vector[i]);
}
ixgbe_reset_q_vectors(adapter);
@@ -2413,7 +2415,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
- EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
+ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -2425,95 +2427,140 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
* @adapter: board private structure
+ * @ring: structure containing ring specific data
*
- * Configure the Tx unit of the MAC after a reset.
+ * Configure the Tx descriptor ring after a reset.
**/
-static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
- u64 tdba;
struct ixgbe_hw *hw = &adapter->hw;
- u32 i, j, tdlen, txctrl;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl;
+ u16 reg_idx = ring->reg_idx;
- /* Setup the HW Tx Head and Tail descriptor pointers */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = adapter->tx_ring[i];
- j = ring->reg_idx;
- tdba = ring->dma;
- tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
- IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
- (tdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
- IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i]->head = IXGBE_TDH(j);
- adapter->tx_ring[i]->tail = IXGBE_TDT(j);
- /*
- * Disable Tx Head Writeback RO bit, since this hoses
- * bookkeeping if things aren't delivered in order.
- */
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
- break;
- case ixgbe_mac_82599EB:
- default:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
- break;
- }
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
- break;
- case ixgbe_mac_82599EB:
- default:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
- break;
- }
+ /* disable queue to avoid issues while updating state */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
+ txdctl & ~IXGBE_TXDCTL_ENABLE);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
+ ring->head = IXGBE_TDH(reg_idx);
+ ring->tail = IXGBE_TDT(reg_idx);
+
+ /* configure fetching thresholds */
+ if (adapter->rx_itr_setting == 0) {
+ /* cannot set wthresh when itr==0 */
+ txdctl &= ~0x007F0000;
+ } else {
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ }
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ /* PThresh workaround for Tx hang with DFP enabled. */
+ txdctl |= 32;
}
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u32 rttdcs;
- u32 mask;
+ /* reinitialize flowdirector state */
+ set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
- /* disable the arbiter while setting MTQC */
- rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- rttdcs |= IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+ /* enable queue */
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
- /* set transmit pool layout */
- mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
- switch (adapter->flags & mask) {
+ /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
- break;
+ /* poll to verify queue is enabled */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
+}
- case (IXGBE_FLAG_DCB_ENABLED):
- /* We enable 8 traffic classes, DCB only */
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
- break;
+static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rttdcs;
+ u32 mask;
- default:
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
- break;
- }
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ /* disable the arbiter while setting MTQC */
+ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ rttdcs |= IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
+
+ /* re-enable the arbiter */
+ rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+}
+
+/**
+ * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 dmatxctl;
+ u32 i;
+
+ ixgbe_setup_mtqc(adapter);
- /* re-eable the arbiter */
- rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ /* DMATXCTL.EN must be before Tx queues are enabled */
+ dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ dmatxctl |= IXGBE_DMATXCTL_TE;
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
u32 srrctl;
int index;
@@ -2529,6 +2576,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ if (adapter->num_vfs)
+ srrctl |= IXGBE_SRRCTL_DROP_EN;
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -2549,20 +2598,46 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
}
-static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
{
- u32 mrqc = 0;
+ struct ixgbe_hw *hw = &adapter->hw;
+ static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
+ 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
+ 0x6A3E67EA, 0x14364D17, 0x3BED200D};
+ u32 mrqc = 0, reta = 0;
+ u32 rxcsum;
+ int i, j;
int mask;
- if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
- return mrqc;
+ /* Fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
+
+ /* Fill out redirection table */
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == adapter->ring_feature[RING_F_RSS].indices)
+ j = 0;
+ /* reta = 4-byte sliding window of
+ * 0x00..(indices-1)(indices-1)00..etc. */
+ reta = (reta << 8) | (j * 0x11);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+ }
+
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ rxcsum |= IXGBE_RXCSUM_PCSD;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
+ else
+ mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
#ifdef CONFIG_IXGBE_DCB
- | IXGBE_FLAG_DCB_ENABLED
+ | IXGBE_FLAG_DCB_ENABLED
#endif
- | IXGBE_FLAG_SRIOV_ENABLED
- );
+ | IXGBE_FLAG_SRIOV_ENABLED
+ );
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
@@ -2580,7 +2655,13 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
break;
}
- return mrqc;
+ /* Perform hash on these packet types */
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
/**
@@ -2588,25 +2669,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
* @adapter: address of board private structure
* @index: index of ring to set
**/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
- struct ixgbe_ring *rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
- int j;
u32 rscctrl;
int rx_buf_len;
+ u16 reg_idx = ring->reg_idx;
- rx_ring = adapter->rx_ring[index];
- j = rx_ring->reg_idx;
- rx_buf_len = rx_ring->rx_buf_len;
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ return;
+
+ rx_buf_len = ring->rx_buf_len;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
@@ -2624,31 +2706,181 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
}
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
/**
- * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
- * @adapter: board private structure
+ * ixgbe_set_uta - Set unicast filter table address
+ * @adapter: board private structure
*
- * Configure the Rx unit of the MAC after a reset.
+ * The unicast table address is a register array of 32-bit registers.
+ * The table is meant to be used in a way similar to how the MTA is used
+ * however due to certain limitations in the hardware it is necessary to
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
**/
-static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return;
+
+ /* we only need to do this if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+
+ /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ do {
+ msleep(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
+ "the polling period\n", reg_idx);
+ }
+}
+
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u16 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
+ rxdctl & ~IXGBE_RXDCTL_ENABLE);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
+ ring->head = IXGBE_RDH(reg_idx);
+ ring->tail = IXGBE_RDT(reg_idx);
+
+ ixgbe_configure_srrctl(adapter, ring);
+ ixgbe_configure_rscctl(adapter, ring);
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ /*
+ * enable cache line friendly hardware writes:
+ * PTHRESH=32 descriptors (half the internal cache),
+ * this also removes ugly rx_no_buffer_count increment
+ * HTHRESH=4 descriptors (to minimize latency on fetch)
+ * WTHRESH=8 burst writeback up to two cache lines
+ */
+ rxdctl &= ~0x3FFFFF;
+ rxdctl |= 0x080420;
+ }
+
+ /* enable receive descriptor ring */
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ ixgbe_rx_desc_queue_enable(adapter, ring);
+ ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+}
+
+static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int p;
+
+ /* PSRTYPE must be initialized in non 82598 adapters */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
+ psrtype |= (adapter->num_rx_queues_per_pool << 29);
+
+ for (p = 0; p < adapter->num_rx_pools; p++)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ psrtype);
+}
+
+static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr_ext;
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
+
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+ /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /*
+ * Set up VF register offsets for selected VT Mode,
+ * i.e. 32 or 64 VFs for SR-IOV
+ */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+ gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+
+ /* enable Tx loopback for VF/PF communication */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+}
+
+static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
{
- u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_ring *rx_ring;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- int i, j;
- u32 rdlen, rxctrl, rxcsum;
- static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
- 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
- 0x6A3E67EA, 0x14364D17, 0x3BED200D};
- u32 fctrl, hlreg0;
- u32 reta = 0, mrqc = 0;
- u32 rdrxctl;
int rx_buf_len;
+ struct ixgbe_ring *rx_ring;
+ int i;
+ u32 mhadd, hlreg0;
/* Decide whether to use packet split mode or not */
/* Do not use packet split if we're in SR-IOV Mode */
@@ -2658,62 +2890,40 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* PSRTYPE must be initialized in 82599 */
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR |
- IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw,
- IXGBE_PSRTYPE(adapter->num_vfs),
- psrtype);
- }
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
- rx_buf_len = ALIGN(max_frame, 1024);
+ rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
}
- fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
- fctrl |= IXGBE_FCTRL_PMCF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+#ifdef IXGBE_FCOE
+ /* adjust max frame to be able to do baby jumbo for FCoE */
+ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+ max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
+ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+ if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
+ mhadd &= ~IXGBE_MHADD_MFS_MASK;
+ mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+ }
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (adapter->netdev->mtu <= ETH_DATA_LEN)
- hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
- else
- hlreg0 |= IXGBE_HLREG0_JUMBOEN;
-#ifdef IXGBE_FCOE
- if (netdev->features & NETIF_F_FCOE_MTU)
- hlreg0 |= IXGBE_HLREG0_JUMBOEN;
-#endif
+ /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
+ hlreg0 |= IXGBE_HLREG0_JUMBOEN;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
- /* disable receives while setting up the descriptors */
- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
-
/*
* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
- rdba = rx_ring->dma;
- j = rx_ring->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
- IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
- rx_ring->head = IXGBE_RDH(j);
- rx_ring->tail = IXGBE_RDT(j);
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
@@ -2729,15 +2939,21 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
- IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
-
#endif /* IXGBE_FCOE */
- ixgbe_configure_srrctl(adapter, rx_ring);
}
- if (hw->mac.type == ixgbe_mac_82598EB) {
+}
+
+static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
/*
* For VMDq support of different descriptor types or
* buffer sizes through the use of multiple SRRCTL
@@ -2748,110 +2964,66 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* effects of setting this bit are only that SRRCTL must be
* fully programmed [0..15]
*/
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+ break;
+ case ixgbe_mac_82599EB:
+ /* Disable RSC for ACK packets */
+ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+ /* hardware requires some bits to be set by default */
+ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+ break;
+ default:
+ /* We should do nothing since we don't know this hardware */
+ return;
}
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- u32 vt_reg_bits;
- u32 reg_offset, vf_shift;
- u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
- | IXGBE_VT_CTL_REPLEN;
- vt_reg_bits |= (adapter->num_vfs <<
- IXGBE_VT_CTL_POOL_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
-
- vf_shift = adapter->num_vfs % 32;
- reg_offset = adapter->num_vfs / 32;
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
- /* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- ixgbe_set_vmolr(hw, adapter->num_vfs, true);
- }
-
- /* Program MRQC for the distribution of queues */
- mrqc = ixgbe_setup_mrqc(adapter);
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- /* Fill out redirection table */
- for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == adapter->ring_feature[RING_F_RSS].indices)
- j = 0;
- /* reta = 4-byte sliding window of
- * 0x00..(indices-1)(indices-1)00..etc. */
- reta = (reta << 8) | (j * 0x11);
- if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
- }
-
- /* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- mrqc |= IXGBE_MRQC_RSSEN;
- /* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
- }
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+}
- if (adapter->num_vfs) {
- u32 reg;
+/**
+ * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ u32 rxctrl;
- /* Map PF MAC address in RAR Entry 0 to first pool
- * following VFs */
- hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+ /* disable receives while setting up the descriptors */
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- /* Set up VF register offsets for selected VT Mode, i.e.
- * 64 VFs for SR-IOV */
- reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- reg |= IXGBE_GCR_EXT_SRIOV;
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
- }
+ ixgbe_setup_psrtype(adapter);
+ ixgbe_setup_rdrxctl(adapter);
- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+ /* Program registers for the distribution of queues */
+ ixgbe_setup_mrqc(adapter);
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
- adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
- /* Disable indicating checksum in descriptor, enables
- * RSS hash */
- rxcsum |= IXGBE_RXCSUM_PCSD;
- }
- if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
- /* Enable IPv4 payload checksum for UDP fragments
- * if PCSD is not set */
- rxcsum |= IXGBE_RXCSUM_IPPCSE;
- }
+ ixgbe_set_uta(adapter);
- IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+ /* set_rx_buffer_len must be called before ring initialization */
+ ixgbe_set_rx_buffer_len(adapter);
- if (hw->mac.type == ixgbe_mac_82599EB) {
- rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
- IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
- }
+ /*
+ * Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- /* Enable 82599 HW-RSC */
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_configure_rscctl(adapter, i);
+ /* disable drop enable for 82598 parts */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
- /* Disable RSC for ACK packets */
- IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
- (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
- }
+ /* enable all receives */
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2955,7 +3127,7 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+ struct vlan_group *grp)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -3052,6 +3224,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ /* set all bits that we expect to always be set */
+ fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
+ fctrl |= IXGBE_FCTRL_PMCF;
+
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
@@ -3157,6 +3334,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
u32 txdctl;
int i, j;
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 65536);
+ return;
+ }
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ netif_set_gso_max_size(adapter->netdev, 32768);
+
ixgbe_dcb_check_config(&adapter->dcb_cfg);
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
@@ -3188,17 +3374,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_restore_vlan(adapter);
#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (hw->mac.type == ixgbe_mac_82598EB)
- netif_set_gso_max_size(netdev, 32768);
- else
- netif_set_gso_max_size(netdev, 65536);
- ixgbe_configure_dcb(adapter);
- } else {
- netif_set_gso_max_size(netdev, 65536);
- }
-#else
- netif_set_gso_max_size(netdev, 65536);
+ ixgbe_configure_dcb(adapter);
#endif
#ifdef IXGBE_FCOE
@@ -3209,17 +3385,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->atr_sample_rate =
- adapter->atr_sample_rate;
+ adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
}
+ ixgbe_configure_virtualization(adapter);
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
- (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -3290,7 +3464,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
goto link_cfg_out;
if (hw->mac.ops.get_link_capabilities)
- ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
+ &negotiation);
if (ret)
goto link_cfg_out;
@@ -3300,62 +3475,15 @@ link_cfg_out:
return ret;
}
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
- int rxr)
-{
- int j = adapter->rx_ring[rxr]->reg_idx;
- int k;
-
- for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
- if (IXGBE_READ_REG(&adapter->hw,
- IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- if (k >= IXGBE_MAX_RX_DESC_POLL) {
- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
- "the polling period\n", rxr);
- }
- ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr]->count - 1));
-}
-
-static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
+static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- int i, j = 0;
- int num_rx_rings = adapter->num_rx_queues;
- int err;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- u32 txdctl, rxdctl, mhadd;
- u32 dmatxctl;
- u32 gpie;
- u32 ctrl_ext;
-
- ixgbe_get_hw_control(adapter);
-
- if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
- (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
- IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
- } else {
- /* MSI only */
- gpie = 0;
- }
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- gpie |= IXGBE_GPIE_VTMODE_64;
- }
- /* XXX: to interrupt immediately for EICS writes, enable this */
- /* gpie |= IXGBE_GPIE_EIMEN; */
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- }
+ u32 gpie = 0;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD;
+ gpie |= IXGBE_GPIE_EIAME;
/*
* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
@@ -3376,98 +3504,33 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
- /* Enable Thermal over heat sensor interrupt */
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie |= IXGBE_SDP0_GPIEN;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ /* XXX: to interrupt immediately for EICS writes, enable this */
+ /* gpie |= IXGBE_GPIE_EIMEN; */
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
}
- /* Enable fan failure interrupt if media type is copper */
- if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ /* Enable fan failure interrupt */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- }
- if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (hw->mac.type == ixgbe_mac_82599EB)
gpie |= IXGBE_SDP1_GPIEN;
gpie |= IXGBE_SDP2_GPIEN;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- }
-
-#ifdef IXGBE_FCOE
- /* adjust max frame to be able to do baby jumbo for FCoE */
- if ((netdev->features & NETIF_F_FCOE_MTU) &&
- (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
- max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-#endif /* IXGBE_FCOE */
- mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
- if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
- mhadd &= ~IXGBE_MHADD_MFS_MASK;
- mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- if (adapter->rx_itr_setting == 0) {
- /* cannot set wthresh when itr==0 */
- txdctl &= ~0x007F0000;
- } else {
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- }
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+}
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* DMATXCTL.EN must be set after all Tx queue config is done */
- dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
- dmatxctl |= IXGBE_DMATXCTL_TE;
- IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
- }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
- if (hw->mac.type == ixgbe_mac_82599EB) {
- int wait_loop = 10;
- /* poll for Tx Enable ready */
- do {
- msleep(1);
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- } while (--wait_loop &&
- !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!wait_loop)
- e_err(drv, "Could not enable Tx Queue %d\n", j);
- }
- }
+static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+ u32 ctrl_ext;
- for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i]->reg_idx;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
- /* enable PTHRESH=32 descriptors (half the internal cache)
- * and HTHRESH=0 descriptors (to minimize latency on fetch),
- * this also removes a pesky rx_no_buffer_count increment */
- rxdctl |= 0x0020;
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_rx_desc_queue_enable(adapter, i);
- }
- /* enable all receives */
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB)
- rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
- else
- rxdctl |= IXGBE_RXCTRL_RXEN;
- hw->mac.ops.enable_rx_dma(hw, rxdctl);
+ ixgbe_get_hw_control(adapter);
+ ixgbe_setup_gpie(adapter);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
ixgbe_configure_msix(adapter);
@@ -3483,7 +3546,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_EICR);
-
ixgbe_irq_enable(adapter);
/*
@@ -3525,12 +3587,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_err(probe, "link_config FAILED %d\n", err);
}
- for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
-
/* enable transmits */
- netif_tx_start_all_queues(netdev);
+ netif_tx_start_all_queues(adapter->netdev);
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
@@ -3609,21 +3667,24 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
* @rx_ring: ring to free buffers from
**/
static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
- /* Free all the Rx ring sk_buffs */
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+ /* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ixgbe_rx_buffer *rx_buffer_info;
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
@@ -3635,7 +3696,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (IXGBE_RSC_CB(this)->delay_unmap) {
dma_unmap_single(&pdev->dev,
IXGBE_RSC_CB(this)->dma,
- rx_ring->rx_buf_len,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
IXGBE_RSC_CB(skb)->delay_unmap = false;
@@ -3677,14 +3738,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
* @tx_ring: ring to be cleaned
**/
static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned long size;
unsigned int i;
- /* Free all the Tx ring sk_buffs */
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_buffer_info)
+ return;
+ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
@@ -3786,13 +3850,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
}
/* Disable the Tx DMA engine on 82599 */
if (hw->mac.type == ixgbe_mac_82599EB)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
- (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
- ~IXGBE_DMATXCTL_TE));
+ (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+ ~IXGBE_DMATXCTL_TE));
/* power down the optics */
if (hw->phy.multispeed_fiber)
@@ -3822,7 +3886,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
static int ixgbe_poll(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
+ container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
int tx_clean_complete, work_done = 0;
@@ -3932,7 +3996,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
* Rx load across CPUs using RSS.
*
**/
-static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -4061,7 +4125,7 @@ done:
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+ int vectors)
{
int err, vector_threshold;
@@ -4080,7 +4144,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
*/
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
+ vectors);
if (!err) /* Success in acquiring all requested vectors. */
break;
else if (err < 0)
@@ -4107,7 +4171,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* vectors we were allocated.
*/
adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ adapter->max_msix_q_vectors + NON_Q_VECTORS);
}
}
@@ -4178,12 +4242,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
}
for ( ; i < 5; i++) {
adapter->tx_ring[i]->reg_idx =
- ((i + 2) << 4);
+ ((i + 2) << 4);
adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
adapter->tx_ring[i]->reg_idx =
- ((i + 8) << 3);
+ ((i + 8) << 3);
adapter->rx_ring[i]->reg_idx = i << 4;
}
@@ -4226,7 +4290,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
-static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
int i;
bool ret = false;
@@ -4383,7 +4447,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!ring)
@@ -4407,7 +4471,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
adapter->node = cur_node;
}
ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ adapter->node);
if (!ring)
ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
if (!ring)
@@ -4453,7 +4517,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
* (roughly) the same number of vectors as there are CPU's.
*/
v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)num_online_cpus()) + NON_Q_VECTORS;
+ (int)num_online_cpus()) + NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
@@ -4467,7 +4531,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
+ sizeof(struct msix_entry), GFP_KERNEL);
if (adapter->msix_entries) {
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
@@ -4529,10 +4593,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL, adapter->node);
+ GFP_KERNEL, adapter->node);
if (!q_vector)
q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -4693,8 +4757,8 @@ static void ixgbe_sfp_timer(unsigned long data)
static void ixgbe_sfp_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_task);
+ struct ixgbe_adapter,
+ sfp_task);
struct ixgbe_hw *hw = &adapter->hw;
if ((hw->phy.type == ixgbe_phy_nl) &&
@@ -4719,7 +4783,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
reschedule:
if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
+ round_jiffies(jiffies + (2 * HZ)));
}
/**
@@ -4775,7 +4839,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->atr_sample_rate = 20;
}
adapter->ring_feature[RING_F_FDIR].indices =
- IXGBE_MAX_FDIR_INDICES;
+ IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4806,7 +4870,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
+ adapter->ring_feature[RING_F_DCB].indices);
#endif
@@ -4861,7 +4925,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
* Return 0 on success, negative on failure
**/
int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -4928,7 +4992,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
* Returns 0 on success, negative on failure
**/
int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
int size;
@@ -5001,7 +5065,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
* Free all transmit software resources
**/
void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring)
{
struct pci_dev *pdev = adapter->pdev;
@@ -5039,7 +5103,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
* Free all receive software resources
**/
void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
@@ -5333,6 +5397,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5343,7 +5408,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 rsc_flush = 0;
for (i = 0; i < 16; i++)
adapter->hw_rx_no_dma_resources +=
- IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i]->rsc_count;
rsc_flush += adapter->rx_ring[i]->rsc_flush;
@@ -5361,119 +5426,118 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
- adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+ hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
/* for packet buffers not used, the register should read 0 */
mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
missed_rx += mpc;
- adapter->stats.mpc[i] += mpc;
- total_mpc += adapter->stats.mpc[i];
+ hwstats->mpc[i] += mpc;
+ total_mpc += hwstats->mpc[i];
if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
- adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
- adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
if (hw->mac.type == ixgbe_mac_82599EB) {
- adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONRXCNT(i));
- adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFRXCNT(i));
- adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ hwstats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
} else {
- adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONRXC(i));
- adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFRXC(i));
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ hwstats->pxoffrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
- adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXONTXC(i));
- adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
- IXGBE_PXOFFTXC(i));
+ hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+ hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
}
- adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
/* work around hardware counting issue */
- adapter->stats.gprc -= missed_rx;
+ hwstats->gprc -= missed_rx;
/* 82598 hardware only has a 32 bit counter in the high register */
if (hw->mac.type == ixgbe_mac_82599EB) {
u64 tmp;
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
- adapter->stats.gorc += (tmp << 32);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
- adapter->stats.gotc += (tmp << 32);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
- adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
+ tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
+ /* 4 high bits of GORC */
+ hwstats->gorc += (tmp << 32);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
+ tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
+ /* 4 high bits of GOTC */
+ hwstats->gotc += (tmp << 32);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
#ifdef IXGBE_FCOE
- adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
- adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
- adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
- adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
- adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
- adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+ hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+ hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+ hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+ hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+ hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
#endif /* IXGBE_FCOE */
} else {
- adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
}
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
- adapter->stats.bprc += bprc;
- adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+ hwstats->bprc += bprc;
+ hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
if (hw->mac.type == ixgbe_mac_82598EB)
- adapter->stats.mprc -= bprc;
- adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
- adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
- adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
- adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
- adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
- adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
- adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
- adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+ hwstats->mprc -= bprc;
+ hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+ hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+ hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+ hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+ hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+ hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+ hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+ hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- adapter->stats.lxontxc += lxon;
+ hwstats->lxontxc += lxon;
lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- adapter->stats.lxofftxc += lxoff;
- adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ hwstats->lxofftxc += lxoff;
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+ hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
/*
* 82598 errata - tx of flow control packets is included in tx counters
*/
xon_off_tot = lxon + lxoff;
- adapter->stats.gptc -= xon_off_tot;
- adapter->stats.mptc -= xon_off_tot;
- adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
- adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
- adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
- adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
- adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
- adapter->stats.ptc64 -= xon_off_tot;
- adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
- adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
- adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
- adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
- adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+ hwstats->gptc -= xon_off_tot;
+ hwstats->mptc -= xon_off_tot;
+ hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
+ hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+ hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+ hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+ hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+ hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ hwstats->ptc64 -= xon_off_tot;
+ hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+ hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+ hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+ hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+ hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+ hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
/* Fill out the OS statistics structure */
- netdev->stats.multicast = adapter->stats.mprc;
+ netdev->stats.multicast = hwstats->mprc;
/* Rx Errors */
- netdev->stats.rx_errors = adapter->stats.crcerrs +
- adapter->stats.rlec;
+ netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
netdev->stats.rx_dropped = 0;
- netdev->stats.rx_length_errors = adapter->stats.rlec;
- netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
+ netdev->stats.rx_length_errors = hwstats->rlec;
+ netdev->stats.rx_crc_errors = hwstats->crcerrs;
netdev->stats.rx_missed_errors = total_mpc;
}
@@ -5532,8 +5596,8 @@ watchdog_short_circuit:
static void ixgbe_multispeed_fiber_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- multispeed_fiber_task);
+ struct ixgbe_adapter,
+ multispeed_fiber_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 autoneg;
bool negotiation;
@@ -5556,8 +5620,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
static void ixgbe_sfp_config_module_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_config_module_task);
+ struct ixgbe_adapter,
+ sfp_config_module_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 err;
@@ -5590,15 +5654,15 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
static void ixgbe_fdir_reinit_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- fdir_reinit_task);
+ struct ixgbe_adapter,
+ fdir_reinit_task);
struct ixgbe_hw *hw = &adapter->hw;
int i;
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n");
@@ -5616,8 +5680,8 @@ static DEFINE_MUTEX(ixgbe_watchdog_lock);
static void ixgbe_watchdog_task(struct work_struct *work)
{
struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- watchdog_task);
+ struct ixgbe_adapter,
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed;
@@ -5648,7 +5712,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (link_up ||
time_after(jiffies, (adapter->link_check_timeout +
- IXGBE_TRY_LINK_TIMEOUT))) {
+ IXGBE_TRY_LINK_TIMEOUT))) {
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
}
@@ -5719,8 +5783,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u8 *hdr_len)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5743,28 +5807,28 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
i = tx_ring->next_to_use;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
/* VLAN MACLEN IPLEN */
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
+ IXGBE_ADVTXD_MACLEN_SHIFT);
*hdr_len += skb_network_offset(skb);
vlan_macip_lens |=
(skb_transport_header(skb) - skb_network_header(skb));
@@ -5775,7 +5839,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ IXGBE_ADVTXD_DTYP_CTXT);
if (skb->protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -5803,9 +5867,53 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+{
+ u32 rtn = 0;
+ __be16 protocol;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
+ protocol = ((const struct vlan_ethhdr *)skb->data)->
+ h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_SCTP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+ }
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ switch (ipv6_hdr(skb)->nexthdr) {
+ case IPPROTO_TCP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_SCTP:
+ rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+ }
+ break;
+ default:
+ if (unlikely(net_ratelimit()))
+ e_warn(probe, "partial checksum but proto=%x!\n",
+ skb->protocol);
+ break;
+ }
+
+ return rtn;
+}
+
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5816,63 +5924,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
(tx_flags & IXGBE_TX_FLAGS_VLAN)) {
i = tx_ring->next_to_use;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
vlan_macip_lens |=
(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
+ IXGBE_ADVTXD_MACLEN_SHIFT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
+ skb_network_header(skb));
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = 0;
type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
- const struct vlan_ethhdr *vhdr =
- (const struct vlan_ethhdr *)skb->data;
-
- protocol = vhdr->h_vlan_encapsulated_proto;
- } else {
- protocol = skb->protocol;
- }
+ IXGBE_ADVTXD_DTYP_CTXT);
- switch (protocol) {
- case cpu_to_be16(ETH_P_IP):
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
- case cpu_to_be16(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- e_warn(probe, "partial checksum "
- "but proto=%x!\n",
- skb->protocol);
- }
- break;
- }
- }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -5893,9 +5963,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
}
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
{
struct pci_dev *pdev = adapter->pdev;
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -5990,7 +6060,7 @@ dma_error:
/* clear timestamp and dma mappings for remaining portion of packet */
while (count--) {
- if (i==0)
+ if (i == 0)
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
@@ -6001,8 +6071,8 @@ dma_error:
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- int tx_flags, int count, u32 paylen, u8 hdr_len)
+ struct ixgbe_ring *tx_ring,
+ int tx_flags, int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6021,17 +6091,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ IXGBE_ADVTXD_POPTS_SHIFT;
/* use index 1 context for tso */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ IXGBE_ADVTXD_POPTS_SHIFT;
} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ IXGBE_ADVTXD_POPTS_SHIFT;
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
olinfo_status |= IXGBE_ADVTXD_CC;
@@ -6045,10 +6115,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
i = tx_ring->next_to_use;
while (count--) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
i++;
if (i == tx_ring->count)
@@ -6070,7 +6140,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ int queue, u32 tx_flags)
{
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
@@ -6098,7 +6168,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
- IXGBE_TX_FLAGS_VLAN_SHIFT;
+ IXGBE_TX_FLAGS_VLAN_SHIFT;
src_ipv4_addr = iph->saddr;
dst_ipv4_addr = iph->daddr;
flex_bytes = eth->h_proto;
@@ -6117,7 +6187,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
}
static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+ struct ixgbe_ring *tx_ring, int size)
{
netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had:
@@ -6137,7 +6207,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
}
static int ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+ struct ixgbe_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -6183,11 +6253,10 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
return skb_tx_hash(dev, skb);
}
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring;
struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
@@ -6211,8 +6280,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
-
#ifdef IXGBE_FCOE
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
@@ -6283,10 +6350,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
if (tx_ring->atr_sample_rate) {
++tx_ring->atr_count;
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state)) {
+ test_bit(__IXGBE_FDIR_INIT_DONE,
+ &tx_ring->reinit_state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags);
tx_ring->atr_count = 0;
}
}
@@ -6294,7 +6361,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
txq->tx_bytes += skb->len;
txq->tx_packets++;
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
- hdr_len);
+ hdr_len);
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
} else {
@@ -6306,6 +6373,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_ring *tx_ring;
+
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
+ return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+}
+
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -6437,7 +6513,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
#endif
static const struct net_device_ops ixgbe_netdev_ops = {
- .ndo_open = ixgbe_open,
+ .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue,
@@ -6532,7 +6608,7 @@ err_novfs:
* and a hardware reset occur.
**/
static int __devinit ixgbe_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct ixgbe_adapter *adapter = NULL;
@@ -6577,7 +6653,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), ixgbe_driver_name);
+ IORESOURCE_MEM), ixgbe_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
@@ -6617,7 +6693,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ pci_resource_len(pdev, 0));
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -6661,7 +6737,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
* which might start the timer
*/
init_timer(&adapter->sfp_timer);
- adapter->sfp_timer.function = &ixgbe_sfp_timer;
+ adapter->sfp_timer.function = ixgbe_sfp_timer;
adapter->sfp_timer.data = (unsigned long) adapter;
INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
@@ -6671,7 +6747,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* a new SFP+ module arrival, called from GPI SDP2 context */
INIT_WORK(&adapter->sfp_config_module_task,
- ixgbe_sfp_config_module_task);
+ ixgbe_sfp_config_module_task);
ii->get_invariants(hw);
@@ -6723,10 +6799,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_probe_vf(adapter, ii);
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
netdev->features |= NETIF_F_IPV6_CSUM;
netdev->features |= NETIF_F_TSO;
@@ -6793,7 +6869,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->mac.ops.disable_tx_laser(hw);
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &ixgbe_watchdog;
+ adapter->watchdog_timer.function = ixgbe_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
@@ -6806,7 +6882,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
switch (pdev->device) {
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
break;
default:
adapter->wol = 0;
@@ -6819,13 +6895,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* print bus type/speed/width info */
e_dev_info("(PCI Express:%s:%s) %pM\n",
- ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
- (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
- "Unknown"),
- netdev->dev_addr);
+ (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
+ hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
+ "Unknown"),
+ (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"),
+ netdev->dev_addr);
ixgbe_read_pba_num_generic(hw, &part_num);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
@@ -6872,7 +6949,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
- INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
+ INIT_WORK(&adapter->check_overtemp_task,
+ ixgbe_check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6908,8 +6986,8 @@ err_eeprom:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -6976,7 +7054,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
iounmap(adapter->hw.hw_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ IORESOURCE_MEM));
e_dev_info("complete\n");
@@ -6996,7 +7074,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
* this device has been detected.
*/
static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7102,8 +7180,7 @@ static struct pci_driver ixgbe_driver = {
static int __init ixgbe_init_module(void)
{
int ret;
- pr_info("%s - version %s\n", ixgbe_driver_string,
- ixgbe_driver_version);
+ pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
pr_info("%s\n", ixgbe_copyright);
#ifdef CONFIG_IXGBE_DCA
@@ -7132,12 +7209,12 @@ static void __exit ixgbe_exit_module(void)
#ifdef CONFIG_IXGBE_DCA
static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
+ void *p)
{
int ret_val;
ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
- __ixgbe_notify_dca);
+ __ixgbe_notify_dca);
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9587d975d66c..d3cc6ce7c973 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -871,6 +871,8 @@
#define IXGBE_RDRXCTL_MVMEN 0x00000020
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index f7015efbff05..da4033c6efa2 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -243,7 +243,6 @@ struct ixgbevf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 918c00359b0a..3eda1bdbbb7a 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -308,8 +308,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
- adapter->net_stats.tx_bytes += total_bytes;
- adapter->net_stats.tx_packets += total_packets;
+ netdev->stats.tx_bytes += total_bytes;
+ netdev->stats.tx_packets += total_packets;
return (count < tx_ring->work_limit);
}
@@ -356,7 +356,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
u32 status_err, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -639,8 +639,8 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
- adapter->net_stats.rx_bytes += total_rx_bytes;
- adapter->net_stats.rx_packets += total_rx_packets;
+ adapter->netdev->stats.rx_bytes += total_rx_bytes;
+ adapter->netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -2297,7 +2297,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfmprc);
/* Fill out the OS statistics structure */
- adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->netdev->stats.multicast = adapter->stats.vfmprc -
adapter->stats.base_vfmprc;
}
@@ -3181,21 +3181,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
/**
- * ixgbevf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* ixgbevf_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -3272,7 +3257,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = &ixgbevf_open,
.ndo_stop = &ixgbevf_close,
.ndo_start_xmit = &ixgbevf_xmit_frame,
- .ndo_get_stats = &ixgbevf_get_stats,
.ndo_set_rx_mode = &ixgbevf_set_rx_mode,
.ndo_set_multicast_list = &ixgbevf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
@@ -3426,7 +3410,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
}
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.function = ixgbevf_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 94b750b8874f..61f9dc831424 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -124,8 +124,6 @@ struct ixgbe_hw {
void *back;
u8 __iomem *hw_addr;
- u8 *flash_address;
- unsigned long io_base;
struct ixgbe_mac_info mac;
struct ixgbe_mbx_info mbx;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 99f24f5cac53..c04c096bc6a9 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -21,6 +21,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -73,7 +75,7 @@ read_again:
}
if (i == 0) {
- jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
+ pr_err("phy(%d) read timeout : %d\n", phy, reg);
return 0;
}
@@ -102,7 +104,7 @@ jme_mdio_write(struct net_device *netdev,
}
if (i == 0)
- jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
+ pr_err("phy(%d) write timeout : %d\n", phy, reg);
}
static inline void
@@ -227,7 +229,7 @@ jme_reload_eeprom(struct jme_adapter *jme)
}
if (i == 0) {
- jeprintk(jme->pdev, "eeprom reload timeout\n");
+ pr_err("eeprom reload timeout\n");
return -EIO;
}
}
@@ -397,8 +399,7 @@ jme_check_link(struct net_device *netdev, int testonly)
phylink = jread32(jme, JME_PHY_LINK);
}
if (!cnt)
- jeprintk(jme->pdev,
- "Waiting speed resolve timeout.\n");
+ pr_err("Waiting speed resolve timeout\n");
strcat(linkmsg, "ANed: ");
}
@@ -480,13 +481,13 @@ jme_check_link(struct net_device *netdev, int testonly)
strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
"MDI-X" :
"MDI");
- netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
+ netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
netif_carrier_on(netdev);
} else {
if (testonly)
goto out;
- netif_info(jme, link, jme->dev, "Link is down.\n");
+ netif_info(jme, link, jme->dev, "Link is down\n");
jme->phylink = 0;
netif_carrier_off(netdev);
}
@@ -648,7 +649,7 @@ jme_disable_tx_engine(struct jme_adapter *jme)
}
if (!i)
- jeprintk(jme->pdev, "Disable TX engine timeout.\n");
+ pr_err("Disable TX engine timeout\n");
}
static void
@@ -867,7 +868,7 @@ jme_disable_rx_engine(struct jme_adapter *jme)
}
if (!i)
- jeprintk(jme->pdev, "Disable RX engine timeout.\n");
+ pr_err("Disable RX engine timeout\n");
}
@@ -887,13 +888,13 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
== RXWBFLAG_UDPON)) {
if (flags & RXWBFLAG_IPV4)
- netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
== RXWBFLAG_IPV4)) {
- netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
return false;
}
@@ -936,7 +937,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
if (jme->vlgrp) {
@@ -1185,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
while (!atomic_dec_and_test(&jme->link_changing)) {
atomic_inc(&jme->link_changing);
- netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
+ netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
while (atomic_read(&jme->link_changing) != 1)
- netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
+ netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
}
if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1221,15 +1222,13 @@ jme_link_change_tasklet(unsigned long arg)
if (netif_carrier_ok(netdev)) {
rc = jme_setup_rx_resources(jme);
if (rc) {
- jeprintk(jme->pdev, "Allocating resources for RX error"
- ", Device STOPPED!\n");
+ pr_err("Allocating resources for RX error, Device STOPPED!\n");
goto out_enable_tasklet;
}
rc = jme_setup_tx_resources(jme);
if (rc) {
- jeprintk(jme->pdev, "Allocating resources for TX error"
- ", Device STOPPED!\n");
+ pr_err("Allocating resources for TX error, Device STOPPED!\n");
goto err_out_free_rx_resources;
}
@@ -1324,7 +1323,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
- netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
netif_wake_queue(jme->dev);
}
@@ -1339,7 +1338,7 @@ jme_tx_clean_tasklet(unsigned long arg)
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
int i, j, cnt = 0, max, err, mask;
- tx_dbg(jme, "Into txclean.\n");
+ tx_dbg(jme, "Into txclean\n");
if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
goto out;
@@ -1361,7 +1360,7 @@ jme_tx_clean_tasklet(unsigned long arg)
!(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
tx_dbg(jme, "txclean: %d+%d@%lu\n",
- i, ctxbi->nr_desc, jiffies);
+ i, ctxbi->nr_desc, jiffies);
err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
@@ -1402,7 +1401,7 @@ jme_tx_clean_tasklet(unsigned long arg)
ctxbi->nr_desc = 0;
}
- tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
+ tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
atomic_set(&txring->next_to_clean, i);
atomic_add(cnt, &txring->nr_free);
@@ -1548,10 +1547,10 @@ jme_request_irq(struct jme_adapter *jme)
rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (rc) {
- jeprintk(jme->pdev,
- "Unable to request %s interrupt (return: %d)\n",
- test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
- rc);
+ netdev_err(netdev,
+ "Unable to request %s interrupt (return: %d)\n",
+ test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
+ rc);
if (test_bit(JME_FLAG_MSI, &jme->flags)) {
pci_disable_msi(jme->pdev);
@@ -1834,7 +1833,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
*flags |= TXFLAG_UDPCS;
break;
default:
- netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
break;
}
}
@@ -1909,12 +1908,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
smp_wmb();
if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
netif_stop_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
smp_wmb();
if (atomic_read(&txring->nr_free)
>= (jme->tx_wake_threshold)) {
netif_wake_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
}
}
@@ -1922,7 +1921,8 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev,
+ "TX Queue Stopped %d@%lu\n", idx, jiffies);
}
}
@@ -1945,7 +1945,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(idx < 0)) {
netif_stop_queue(netdev);
- netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
+ netif_err(jme, tx_err, jme->dev,
+ "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1957,9 +1958,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
TXCS_QUEUE0S |
TXCS_ENABLE);
- tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
- skb_shinfo(skb)->nr_frags + 2,
- jiffies);
+ tx_dbg(jme, "xmit: %d+%d@%lu\n",
+ idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
jme_stop_queue_if_full(jme);
return NETDEV_TX_OK;
@@ -2501,7 +2501,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return 0xFF;
}
@@ -2517,7 +2517,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return 0xFF;
}
@@ -2537,7 +2537,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return;
}
@@ -2554,7 +2554,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
return;
}
@@ -2699,26 +2699,26 @@ jme_init_one(struct pci_dev *pdev,
*/
rc = pci_enable_device(pdev);
if (rc) {
- jeprintk(pdev, "Cannot enable PCI device.\n");
+ pr_err("Cannot enable PCI device\n");
goto err_out;
}
using_dac = jme_pci_dma64(pdev);
if (using_dac < 0) {
- jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
+ pr_err("Cannot set PCI DMA Mask\n");
rc = -EIO;
goto err_out_disable_pdev;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- jeprintk(pdev, "No PCI resource region found.\n");
+ pr_err("No PCI resource region found\n");
rc = -ENOMEM;
goto err_out_disable_pdev;
}
rc = pci_request_regions(pdev, DRV_NAME);
if (rc) {
- jeprintk(pdev, "Cannot obtain PCI resource region.\n");
+ pr_err("Cannot obtain PCI resource region\n");
goto err_out_disable_pdev;
}
@@ -2729,7 +2729,7 @@ jme_init_one(struct pci_dev *pdev,
*/
netdev = alloc_etherdev(sizeof(*jme));
if (!netdev) {
- jeprintk(pdev, "Cannot allocate netdev structure.\n");
+ pr_err("Cannot allocate netdev structure\n");
rc = -ENOMEM;
goto err_out_release_regions;
}
@@ -2767,7 +2767,7 @@ jme_init_one(struct pci_dev *pdev,
jme->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!(jme->regs)) {
- jeprintk(pdev, "Mapping PCI resource region error.\n");
+ pr_err("Mapping PCI resource region error\n");
rc = -ENOMEM;
goto err_out_free_netdev;
}
@@ -2855,8 +2855,8 @@ jme_init_one(struct pci_dev *pdev,
if (!jme->mii_if.phy_id) {
rc = -EIO;
- jeprintk(pdev, "Can not find phy_id.\n");
- goto err_out_unmap;
+ pr_err("Can not find phy_id\n");
+ goto err_out_unmap;
}
jme->reg_ghc |= GHC_LINK_POLL;
@@ -2883,8 +2883,7 @@ jme_init_one(struct pci_dev *pdev,
jme_reset_mac_processor(jme);
rc = jme_reload_eeprom(jme);
if (rc) {
- jeprintk(pdev,
- "Reload eeprom for reading MAC Address error.\n");
+ pr_err("Reload eeprom for reading MAC Address error\n");
goto err_out_unmap;
}
jme_load_macaddr(netdev);
@@ -2900,7 +2899,7 @@ jme_init_one(struct pci_dev *pdev,
*/
rc = register_netdev(netdev);
if (rc) {
- jeprintk(pdev, "Cannot register net device.\n");
+ pr_err("Cannot register net device\n");
goto err_out_unmap;
}
@@ -3042,8 +3041,7 @@ static struct pci_driver jme_driver = {
static int __init
jme_init_module(void)
{
- printk(KERN_INFO PFX "JMicron JMC2XX ethernet "
- "driver version %s\n", DRV_VERSION);
+ pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
return pci_register_driver(&jme_driver);
}
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 07ad3a457185..1360f68861b8 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -41,9 +41,6 @@
NETIF_MSG_TX_ERR | \
NETIF_MSG_HW)
-#define jeprintk(pdev, fmt, args...) \
- printk(KERN_ERR PFX fmt, ## args)
-
#ifdef TX_DEBUG
#define tx_dbg(priv, fmt, args...) \
printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a6f13f..51919fcd50c2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_wrreg16(ks, KS_RXQCR,
ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
- if (rxlen > 0) {
- skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
- if (!skb) {
- /* todo - dump frame and move on */
- }
+ if (rxlen > 4) {
+ unsigned int rxalign;
+
+ rxlen -= 4;
+ rxalign = ALIGN(rxlen, 4);
+ skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+ if (skb) {
- /* two bytes to ensure ip is aligned, and four bytes
- * for the status header and 4 bytes of garbage */
- skb_reserve(skb, 2 + 4 + 4);
+ /* 4 bytes of status header + 4 bytes of
+ * garbage: we put them before ethernet
+ * header, so that they are copied,
+ * but ignored.
+ */
- rxpkt = skb_put(skb, rxlen - 4) - 8;
+ rxpkt = skb_put(skb, rxlen) - 8;
- /* align the packet length to 4 bytes, and add 4 bytes
- * as we're getting the rx status header as well */
- ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+ ks8851_rdfifo(ks, rxpkt, rxalign + 8);
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
+ if (netif_msg_pktdata(ks))
+ ks8851_dbg_dumpkkt(ks, rxpkt);
- skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx(skb);
+ skb->protocol = eth_type_trans(skb, ks->netdev);
+ netif_rx(skb);
- ks->netdev->stats.rx_packets++;
- ks->netdev->stats.rx_bytes += rxlen - 4;
+ ks->netdev->stats.rx_packets++;
+ ks->netdev->stats.rx_bytes += rxlen;
+ }
}
ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 4eea3f70c5cf..874ee01e8d9d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -159,7 +159,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
* temac_dcr_setup - If the DMA is DCR based, then setup the address and
* I/O functions
*/
-static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
{
unsigned int dcrs;
@@ -184,7 +184,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
* temac_dcr_setup - This is a stub for when DCR is not supported,
* such as with MicroBlaze
*/
-static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
{
return -1;
@@ -760,7 +760,7 @@ static void ll_temac_recv(struct net_device *ndev)
skb_put(skb, length);
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
@@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev)
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
- ll_temac_rx_irq(lp->tx_irq, lp);
- ll_temac_tx_irq(lp->rx_irq, lp);
+ ll_temac_rx_irq(lp->tx_irq, ndev);
+ ll_temac_tx_irq(lp->rx_irq, ndev);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
@@ -952,7 +952,7 @@ static const struct attribute_group temac_attr_group = {
};
static int __init
-temac_of_probe(struct of_device *op, const struct of_device_id *match)
+temac_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *np;
struct temac_local *lp;
@@ -1094,7 +1094,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
return rc;
}
-static int __devexit temac_of_remove(struct of_device *op)
+static int __devexit temac_of_remove(struct platform_device *op)
{
struct net_device *ndev = dev_get_drvdata(&op->dev);
struct temac_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 3832fa4961dd..f84f5e6ededb 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -562,19 +562,19 @@ static int __init mac8390_initdev(struct net_device *dev,
case ACCESS_16:
/* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = back4_offsets;
break;
case ACCESS_32:
/* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = sane_block_input;
+ ei_status.block_output = sane_block_output;
+ ei_status.get_8390_hdr = sane_get_8390_hdr;
ei_status.reg_offset = back4_offsets;
access_bitmode = 1;
break;
@@ -586,19 +586,19 @@ static int __init mac8390_initdev(struct net_device *dev,
* but overwrite system memory when run at 32 bit.
* so we run them all at 16 bit.
*/
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = back4_offsets;
break;
case MAC8390_CABLETRON:
/* 16 bit card, register map is short forward */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = fwrd2_offsets;
break;
@@ -606,19 +606,19 @@ static int __init mac8390_initdev(struct net_device *dev,
case MAC8390_KINETICS:
/* 16 bit memory, register map is forward */
/* dayna and similar */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &dayna_block_input;
- ei_status.block_output = &dayna_block_output;
- ei_status.get_8390_hdr = &dayna_get_8390_hdr;
+ ei_status.reset_8390 = mac8390_no_reset;
+ ei_status.block_input = dayna_block_input;
+ ei_status.block_output = dayna_block_output;
+ ei_status.get_8390_hdr = dayna_get_8390_hdr;
ei_status.reg_offset = fwrd4_offsets;
break;
case MAC8390_INTERLAN:
/* 16 bit memory, register map is forward */
- ei_status.reset_8390 = &interlan_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reset_8390 = interlan_reset;
+ ei_status.block_input = slow_sane_block_input;
+ ei_status.block_output = slow_sane_block_output;
+ ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
ei_status.reg_offset = fwrd4_offsets;
break;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index ff2f158ab0b9..4297f6e8c4bc 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -407,7 +407,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
}
skb_reserve(skb, RX_OFFSET);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb_put(skb, len);
for (frag = first_frag; ; frag = NEXT_RX(frag)) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0ef0eb0db945..0fc9dc7f20db 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -788,6 +788,10 @@ static int macvlan_device_event(struct notifier_block *unused,
}
break;
case NETDEV_UNREGISTER:
+ /* twiddle thumbs on netns device moves */
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b1c54a9c6ef..42567279843e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -84,26 +84,45 @@ static const struct proto_ops macvtap_socket_ops;
static DEFINE_SPINLOCK(macvtap_lock);
/*
- * Choose the next free queue, for now there is only one
+ * get_slot: return a [unused/occupied] slot in vlan->taps[]:
+ * - if 'q' is NULL, return the first empty slot;
+ * - otherwise, return the slot this pointer occupies.
*/
+static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
+{
+ int i;
+
+ for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
+ if (rcu_dereference(vlan->taps[i]) == q)
+ return i;
+ }
+
+ /* Should never happen */
+ BUG_ON(1);
+}
+
static int macvtap_set_queue(struct net_device *dev, struct file *file,
struct macvtap_queue *q)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ int index;
int err = -EBUSY;
spin_lock(&macvtap_lock);
- if (rcu_dereference(vlan->tap))
+ if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
goto out;
err = 0;
+ index = get_slot(vlan, NULL);
rcu_assign_pointer(q->vlan, vlan);
- rcu_assign_pointer(vlan->tap, q);
+ rcu_assign_pointer(vlan->taps[index], q);
sock_hold(&q->sk);
q->file = file;
file->private_data = q;
+ vlan->numvtaps++;
+
out:
spin_unlock(&macvtap_lock);
return err;
@@ -124,9 +143,12 @@ static void macvtap_put_queue(struct macvtap_queue *q)
spin_lock(&macvtap_lock);
vlan = rcu_dereference(q->vlan);
if (vlan) {
- rcu_assign_pointer(vlan->tap, NULL);
+ int index = get_slot(vlan, q);
+
+ rcu_assign_pointer(vlan->taps[index], NULL);
rcu_assign_pointer(q->vlan, NULL);
sock_put(&q->sk);
+ --vlan->numvtaps;
}
spin_unlock(&macvtap_lock);
@@ -136,39 +158,82 @@ static void macvtap_put_queue(struct macvtap_queue *q)
}
/*
- * Since we only support one queue, just dereference the pointer.
+ * Select a queue based on the rxq of the device on which this packet
+ * arrived. If the incoming device is not mq, calculate a flow hash
+ * to select a queue. If all fails, find the first available queue.
+ * Cache vlan->numvtaps since it can become zero during the execution
+ * of this function.
*/
static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
struct sk_buff *skb)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvtap_queue *tap = NULL;
+ int numvtaps = vlan->numvtaps;
+ __u32 rxq;
+
+ if (!numvtaps)
+ goto out;
+
+ if (likely(skb_rx_queue_recorded(skb))) {
+ rxq = skb_get_rx_queue(skb);
+
+ while (unlikely(rxq >= numvtaps))
+ rxq -= numvtaps;
+
+ tap = rcu_dereference(vlan->taps[rxq]);
+ if (tap)
+ goto out;
+ }
+
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
- return rcu_dereference(vlan->tap);
+ /* Everything failed - find first available queue */
+ for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
+ tap = rcu_dereference(vlan->taps[rxq]);
+ if (tap)
+ break;
+ }
+
+out:
+ return tap;
}
/*
* The net_device is going away, give up the reference
- * that it holds on the queue (all the queues one day)
- * and safely set the pointer from the queues to NULL.
+ * that it holds on all queues and safely set the pointer
+ * from the queues to NULL.
*/
static void macvtap_del_queues(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvtap_queue *q;
+ struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
+ int i, j = 0;
+ /* macvtap_put_queue can free some slots, so go through all slots */
spin_lock(&macvtap_lock);
- q = rcu_dereference(vlan->tap);
- if (!q) {
- spin_unlock(&macvtap_lock);
- return;
+ for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
+ q = rcu_dereference(vlan->taps[i]);
+ if (q) {
+ qlist[j++] = q;
+ rcu_assign_pointer(vlan->taps[i], NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ vlan->numvtaps--;
+ }
}
-
- rcu_assign_pointer(vlan->tap, NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ BUG_ON(vlan->numvtaps != 0);
spin_unlock(&macvtap_lock);
synchronize_rcu();
- sock_put(&q->sk);
+
+ for (--j; j >= 0; j--)
+ sock_put(&qlist[j]->sk);
}
/*
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 1fd068e1d930..d1aa45a15854 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -6,4 +6,4 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
- en_resources.o en_netdev.o
+ en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8c8515619b8e..8f4bf1f07c11 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -74,7 +74,7 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
- u32 obj, i;
+ u32 obj;
if (likely(cnt == 1 && align == 1))
return mlx4_bitmap_alloc(bitmap);
@@ -91,8 +91,7 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
}
if (obj < bitmap->max) {
- for (i = 0; i < cnt; i++)
- set_bit(obj + i, bitmap->table);
+ bitmap_set(bitmap->table, obj, cnt);
if (obj == bitmap->last) {
bitmap->last = (obj + cnt);
if (bitmap->last >= bitmap->max)
@@ -109,13 +108,10 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
{
- u32 i;
-
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
- for (i = 0; i < cnt; i++)
- clear_bit(obj + i, bitmap->table);
+ bitmap_clear(bitmap->table, obj, cnt);
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
@@ -125,8 +121,6 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
- int i;
-
/* num must be a power of 2 */
if (num != roundup_pow_of_two(num))
return -EINVAL;
@@ -142,8 +136,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
if (!bitmap->table)
return -ENOMEM;
- for (i = 0; i < reserved_bot; ++i)
- set_bit(i, bitmap->table);
+ bitmap_set(bitmap->table, 0, reserved_bot);
return 0;
}
@@ -188,7 +181,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
- buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
+ buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index b275238fe70d..056152b3ff58 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -39,21 +39,6 @@
#include "en_port.h"
-static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
-{
- int i;
-
- priv->port_stats.lro_aggregated = 0;
- priv->port_stats.lro_flushed = 0;
- priv->port_stats.lro_no_desc = 0;
-
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
- priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
- priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
- }
-}
-
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
@@ -112,7 +97,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tx_heartbeat_errors", "tx_window_errors",
/* port statistics */
- "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
+ "tso_packets",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
@@ -125,6 +110,14 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
#define NUM_MAIN_STATS 21
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
+ "Interupt Test",
+ "Link Test",
+ "Speed Test",
+ "Register Test",
+ "Loopback Test",
+};
+
static u32 mlx4_en_get_msglevel(struct net_device *dev)
{
return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
@@ -146,10 +139,15 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- if (sset != ETH_SS_STATS)
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_ALL_STATS +
+ (priv->tx_ring_num + priv->rx_ring_num) * 2;
+ case ETH_SS_TEST:
+ return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
+ default:
return -EOPNOTSUPP;
-
- return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+ }
}
static void mlx4_en_get_ethtool_stats(struct net_device *dev,
@@ -161,8 +159,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
spin_lock_bh(&priv->stats_lock);
- mlx4_en_update_lro_stats(priv);
-
for (i = 0; i < NUM_MAIN_STATS; i++)
data[index++] = ((unsigned long *) &priv->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++)
@@ -181,6 +177,12 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
}
+static void mlx4_en_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ mlx4_en_ex_selftest(dev, &etest->flags, buf);
+}
+
static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
@@ -188,44 +190,76 @@ static void mlx4_en_get_strings(struct net_device *dev,
int index = 0;
int i;
- if (stringset != ETH_SS_STATS)
- return;
-
- /* Add main counters */
- for (i = 0; i < NUM_MAIN_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
- for (i = 0; i < NUM_PORT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
+ switch (stringset) {
+ case ETH_SS_TEST:
+ for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ if (priv->mdev->dev->caps.loopback_support)
+ for (; i < MLX4_EN_NUM_SELF_TEST; i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ break;
+
+ case ETH_SS_STATS:
+ /* Add main counters */
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+ for (i = 0; i< NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS]);
- for (i = 0; i < priv->tx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
- }
- for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
- }
- for (i = 0; i < NUM_PKT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_bytes", i);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_bytes", i);
+ }
+ for (i = 0; i< NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+ break;
+ }
}
static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int trans_type;
+
cmd->autoneg = AUTONEG_DISABLE;
cmd->supported = SUPPORTED_10000baseT_Full;
- cmd->advertising = ADVERTISED_1000baseT_Full;
+ cmd->advertising = ADVERTISED_10000baseT_Full;
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ trans_type = priv->port_state.transciver;
if (netif_carrier_ok(dev)) {
- cmd->speed = SPEED_10000;
+ cmd->speed = priv->port_state.link_speed;
cmd->duplex = DUPLEX_FULL;
} else {
cmd->speed = -1;
cmd->duplex = -1;
}
+
+ if (trans_type > 0 && trans_type <= 0xC) {
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+ } else if (trans_type == 0x80 || trans_type == 0) {
+ cmd->port = PORT_TP;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->port = -1;
+ cmd->transceiver = -1;
+ }
return 0;
}
@@ -343,8 +377,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
- if (rx_size == priv->prof->rx_ring_size &&
- tx_size == priv->prof->tx_ring_size)
+ if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
+ priv->rx_ring[0].size) &&
+ tx_size == priv->tx_ring[0].size)
return 0;
mutex_lock(&mdev->state_lock);
@@ -378,49 +413,13 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
memset(param, 0, sizeof(*param));
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
- param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
- param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
-}
-
-static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int rc = 0;
- int changed = 0;
-
- if (data & ~ETH_FLAG_LRO)
- return -EOPNOTSUPP;
-
- if (data & ETH_FLAG_LRO) {
- if (mdev->profile.num_lro == 0)
- return -EOPNOTSUPP;
- if (!(dev->features & NETIF_F_LRO))
- changed = 1;
- } else if (dev->features & NETIF_F_LRO) {
- changed = 1;
- }
-
- if (changed) {
- if (netif_running(dev)) {
- mutex_lock(&mdev->state_lock);
- mlx4_en_stop_port(dev);
- }
- dev->features ^= NETIF_F_LRO;
- if (netif_running(dev)) {
- rc = mlx4_en_start_port(dev);
- if (rc)
- en_err(priv, "Failed to restart port\n");
- mutex_unlock(&mdev->state_lock);
- }
- }
-
- return rc;
+ param->rx_pending = priv->port_up ?
+ priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
+ param->tx_pending = priv->tx_ring[0].size;
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
@@ -441,6 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_strings = mlx4_en_get_strings,
.get_sset_count = mlx4_en_get_sset_count,
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
+ .self_test = mlx4_en_self_test,
.get_wol = mlx4_en_get_wol,
.get_msglevel = mlx4_en_get_msglevel,
.set_msglevel = mlx4_en_set_msglevel,
@@ -451,7 +451,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_flags = ethtool_op_get_flags,
- .set_flags = mlx4_ethtool_op_set_flags,
};
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 97934f1ec53a..143906417048 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -63,15 +63,12 @@ static const char mlx4_en_version[] =
*/
-/* Use a XOR rathern than Toeplitz hash function for RSS */
-MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
-
-/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
-MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
-
-/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
-MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
- "Number of LRO sessions per ring or disabled (0)");
+/* Enable RSS TCP traffic */
+MLX4_EN_PARM_INT(tcp_rss, 1,
+ "Enable RSS for incomming TCP traffic or disabled (0)");
+/* Enable RSS UDP traffic */
+MLX4_EN_PARM_INT(udp_rss, 1,
+ "Enable RSS for incomming UDP traffic or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -107,9 +104,12 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
struct mlx4_en_profile *params = &mdev->profile;
int i;
- params->rss_xor = (rss_xor != 0);
- params->rss_mask = rss_mask & 0x1f;
- params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+ params->tcp_rss = tcp_rss;
+ params->udp_rss = udp_rss;
+ if (params->udp_rss && !mdev->dev->caps.udp_rss) {
+ mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ params->udp_rss = 0;
+ }
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
params->prof[i].rx_pause = 1;
params->prof[i].rx_ppp = pfcrx;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a02..411bda581c04 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -109,7 +109,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
mutex_unlock(&mdev->state_lock);
}
-static u64 mlx4_en_mac_to_u64(u8 *addr)
+u64 mlx4_en_mac_to_u64(u8 *addr)
{
u64 mac = 0;
int i;
@@ -513,6 +513,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
}
+ if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
+ queue_work(mdev->workqueue, &priv->mac_task);
+ mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
+ }
mutex_unlock(&mdev->state_lock);
}
@@ -528,10 +532,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
* report to system log */
if (priv->last_link_state != linkstate) {
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- en_dbg(LINK, priv, "Link Down\n");
+ en_info(priv, "Link Down\n");
netif_carrier_off(priv->dev);
} else {
- en_dbg(LINK, priv, "Link Up\n");
+ en_info(priv, "Link Up\n");
netif_carrier_on(priv->dev);
}
}
@@ -653,6 +657,7 @@ int mlx4_en_start_port(struct net_device *dev)
en_err(priv, "Failed setting port mac\n");
goto tx_err;
}
+ mdev->mac_removed[priv->port] = 0;
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
@@ -704,12 +709,12 @@ void mlx4_en_stop_port(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
- /* close port*/
+ /* Set port as not active */
priv->port_up = false;
- mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Unregister Mac address for the port */
mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+ mdev->mac_removed[priv->port] = 1;
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
@@ -731,6 +736,9 @@ void mlx4_en_stop_port(struct net_device *dev)
msleep(1);
mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
}
+
+ /* close port*/
+ mlx4_CLOSE_PORT(mdev->dev, priv->port);
}
static void mlx4_en_restart(struct work_struct *work)
@@ -1023,9 +1031,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Set defualt MAC */
dev->addr_len = ETH_ALEN;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[ETH_ALEN - 1 - i] =
- (u8) (priv->mac >> (8 * i));
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+ dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+ }
/*
* Set driver features
@@ -1038,8 +1047,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- if (mdev->profile.num_lro)
- dev->features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_GRO;
if (mdev->LSO_support) {
dev->features |= NETIF_F_TSO;
dev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index a29abe845d2e..aa3ef2aee5bf 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -142,6 +142,38 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
return err;
}
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
+{
+ struct mlx4_en_query_port_context *qport_context;
+ struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+ struct mlx4_en_port_state *state = &priv->port_state;
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, sizeof(*qport_context));
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+ qport_context = mailbox->buf;
+
+ /* This command is always accessed from Ethtool context
+ * already synchronized, no need in locking */
+ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
+ if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
+ MLX4_EN_1G_SPEED)
+ state->link_speed = 1000;
+ else
+ state->link_speed = 10000;
+ state->transciver = qport_context->transceiver;
+
+out:
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return err;
+}
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index e6477f12beb5..f6511aa2b7df 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -84,6 +84,20 @@ enum {
MLX4_MCAST_ENABLE = 2,
};
+struct mlx4_en_query_port_context {
+ u8 link_up;
+#define MLX4_EN_LINK_UP_MASK 0x80
+ u8 reserved;
+ __be16 mtu;
+ u8 reserved2;
+ u8 link_speed;
+#define MLX4_EN_SPEED_MASK 0x3
+#define MLX4_EN_1G_SPEED 0x2
+ u16 reserved3[5];
+ __be64 mac;
+ u8 transceiver;
+};
+
struct mlx4_en_stat_out_mbox {
/* Received frames with a length of 64 octets */
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 8e2fcb7103c3..570f2508fb30 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -42,18 +42,6 @@
#include "mlx4_en.h"
-static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr,
- u64 *hdr_flags, void *priv)
-{
- *mac_hdr = page_address(frags->page) + frags->page_offset;
- *ip_hdr = *mac_hdr + ETH_HLEN;
- *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- return 0;
-}
-
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct skb_frag_struct *skb_frags,
@@ -251,7 +239,6 @@ reduce_rings:
ring->prod--;
mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
}
- ring->size_mask = ring->actual_size - 1;
}
return 0;
@@ -313,28 +300,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
}
ring->buf = ring->wqres.buf.direct.buf;
- /* Configure lro mngr */
- memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
- ring->lro.dev = priv->dev;
- ring->lro.features = LRO_F_NAPI;
- ring->lro.frag_align_pad = NET_IP_ALIGN;
- ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
- ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
- ring->lro.max_desc = mdev->profile.num_lro;
- ring->lro.max_aggr = MAX_SKB_FRAGS;
- ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
- sizeof(struct net_lro_desc),
- GFP_KERNEL);
- if (!ring->lro.lro_arr) {
- en_err(priv, "Failed to allocate lro array\n");
- goto err_map;
- }
- ring->lro.get_frag_header = mlx4_en_get_frag_header;
-
return 0;
-err_map:
- mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_ring:
@@ -389,6 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
+ ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
}
@@ -412,7 +380,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
- kfree(ring->lro.lro_arr);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
vfree(ring->rx_info);
@@ -459,7 +426,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
goto fail;
/* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
@@ -541,6 +508,21 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
return skb;
}
+static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+{
+ int i;
+ int offset = ETH_HLEN;
+
+ for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
+ if (*(skb->data + offset) != (unsigned char) (i & 0xff))
+ goto out_loopback;
+ }
+ /* Loopback found */
+ priv->loopback_ok = 1;
+
+out_loopback:
+ dev_kfree_skb_any(skb);
+}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
@@ -548,7 +530,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct skb_frag_struct *skb_frags;
- struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -608,37 +589,35 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* - TCP/IP (v4)
* - without IP options
* - not an IP fragment */
- if (mlx4_en_can_lro(cqe->status) &&
- dev->features & NETIF_F_LRO) {
+ if (dev->features & NETIF_F_GRO) {
+ struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+ if (!gro_skb)
+ goto next;
nr = mlx4_en_complete_rx_desc(
priv, rx_desc,
- skb_frags, lro_frags,
+ skb_frags, skb_shinfo(gro_skb)->frags,
ring->page_alloc, length);
if (!nr)
goto next;
+ skb_shinfo(gro_skb)->nr_frags = nr;
+ gro_skb->len = length;
+ gro_skb->data_len = length;
+ gro_skb->truesize += length;
+ gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
if (priv->vlgrp && (cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
- lro_vlan_hwaccel_receive_frags(
- &ring->lro, lro_frags,
- length, length,
- priv->vlgrp,
- be16_to_cpu(cqe->sl_vid),
- NULL, 0);
- } else
- lro_receive_frags(&ring->lro,
- lro_frags,
- length,
- length,
- NULL, 0);
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
+ vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
+ else
+ napi_gro_frags(&cq->napi);
goto next;
}
/* LRO not possible, complete processing here */
ip_summed = CHECKSUM_UNNECESSARY;
- INC_PERF_COUNTER(priv->pstats.lro_misses);
} else {
ip_summed = CHECKSUM_NONE;
priv->port_stats.rx_chksum_none++;
@@ -655,6 +634,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ if (unlikely(priv->validate_loopback)) {
+ validate_loopback(priv, skb);
+ goto next;
+ }
+
skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
@@ -674,14 +658,10 @@ next:
if (++polled == budget) {
/* We are here because we reached the NAPI budget -
* flush only pending LRO sessions */
- lro_flush_all(&ring->lro);
goto out;
}
}
- /* If CQ is empty flush all LRO sessions unconditionally */
- lro_flush_all(&ring->lro);
-
out:
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_cq_set_ci(&cq->mcq);
@@ -816,7 +796,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
qp->event = mlx4_en_sqp_event;
memset(context, 0, sizeof *context);
- mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0,
+ mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
@@ -839,8 +819,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_qp_context context;
struct mlx4_en_rss_context *rss_context;
void *ptr;
- int rss_xor = mdev->profile.rss_xor;
- u8 rss_mask = mdev->profile.rss_mask;
+ u8 rss_mask = 0x3f;
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -886,9 +865,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
- rss_context->hash_fn = rss_xor & 0x3;
- rss_context->flags = rss_mask << 2;
+ rss_context->flags = rss_mask;
+ if (priv->mdev->profile.udp_rss)
+ rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
new file mode 100644
index 000000000000..43357d35616a
--- /dev/null
+++ b/drivers/net/mlx4/en_selftest.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/mlx4/driver.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
+{
+ return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethh;
+ unsigned char *packet;
+ unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
+ unsigned int i;
+ int err;
+
+
+ /* build the pkt before xmit */
+ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
+ if (!skb) {
+ en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
+ packet = (unsigned char *)skb_put(skb, packet_size);
+ memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
+ memset(ethh->h_source, 0, ETH_ALEN);
+ ethh->h_proto = htons(ETH_P_ARP);
+ skb_set_mac_header(skb, 0);
+ for (i = 0; i < packet_size; ++i) /* fill our packet */
+ packet[i] = (unsigned char)(i & 0xff);
+
+ /* xmit the pkt */
+ err = mlx4_en_xmit(skb, priv->dev);
+ return err;
+}
+
+static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
+{
+ u32 loopback_ok = 0;
+ int i;
+
+
+ priv->loopback_ok = 0;
+ priv->validate_loopback = 1;
+
+ /* xmit */
+ if (mlx4_en_test_loopback_xmit(priv)) {
+ en_err(priv, "Transmitting loopback packet failed\n");
+ goto mlx4_en_test_loopback_exit;
+ }
+
+ /* polling for result */
+ for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
+ msleep(MLX4_EN_LOOPBACK_TIMEOUT);
+ if (priv->loopback_ok) {
+ loopback_ok = 1;
+ break;
+ }
+ }
+ if (!loopback_ok)
+ en_err(priv, "Loopback packet didn't arrive\n");
+
+mlx4_en_test_loopback_exit:
+
+ priv->validate_loopback = 0;
+ return (!loopback_ok);
+}
+
+
+static int mlx4_en_test_link(struct mlx4_en_priv *priv)
+{
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+ if (priv->port_state.link_state == 1)
+ return 0;
+ else
+ return 1;
+}
+
+static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
+{
+
+ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+ return -ENOMEM;
+
+ /* The device currently only supports 10G speed */
+ if (priv->port_state.link_speed != SPEED_10000)
+ return priv->port_state.link_speed;
+ return 0;
+}
+
+
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *tx_ring;
+ int i, carrier_ok;
+
+ memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
+
+ if (*flags & ETH_TEST_FL_OFFLINE) {
+ /* disable the interface */
+ carrier_ok = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+retry_tx:
+ /* Wait untill all tx queues are empty.
+ * there should not be any additional incoming traffic
+ * since we turned the carrier off */
+ msleep(200);
+ for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
+ tx_ring = &priv->tx_ring[i];
+ if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
+ goto retry_tx;
+ }
+
+ if (priv->mdev->dev->caps.loopback_support){
+ buf[3] = mlx4_en_test_registers(priv);
+ buf[4] = mlx4_en_test_loopback(priv);
+ }
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+
+ }
+ buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[1] = mlx4_en_test_link(priv);
+ buf[2] = mlx4_en_test_speed(priv);
+
+ for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
+ if (buf[i])
+ *flags |= ETH_TEST_FL_FAILED;
+ }
+}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 580968f304eb..98dd620042a8 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -38,6 +38,7 @@
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/tcp.h>
#include "mlx4_en.h"
@@ -600,6 +601,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx4_wqe_data_seg *data;
struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
+ struct ethhdr *ethh;
+ u64 mac;
+ u32 mac_l, mac_h;
int tx_ind = 0;
int nr_txbb;
int desc_size;
@@ -612,6 +616,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int lso_header_size;
void *fragptr;
+ if (!priv->port_up)
+ goto tx_drop;
+
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
goto tx_drop;
@@ -676,6 +683,19 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
priv->port_stats.tx_chksum_offload++;
}
+ if (unlikely(priv->validate_loopback)) {
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
+ }
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 6d7b2bf210ce..552d0fce6f67 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -699,3 +699,47 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
+
+/* A test that verifies that we can accept interrupts on all
+ * the irq vectors of the device.
+ * Interrupts are checked using the NOP command.
+ */
+int mlx4_test_interrupts(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int err;
+
+ err = mlx4_NOP(dev);
+ /* When not in MSI_X, there is only one irq to check */
+ if (!(dev->flags & MLX4_FLAG_MSI_X))
+ return err;
+
+ /* A loop over all completion vectors, for each vector we will check
+ * whether it works by mapping command completions to that vector
+ * and performing a NOP command
+ */
+ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
+
+ /* Map the new eq to handle all asyncronous events */
+ err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[i].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ mlx4_cmd_use_events(dev);
+ break;
+ }
+
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+ }
+
+ /* Return to default */
+ mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_test_interrupts);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 04f42ae1eda0..b716e1a1b298 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -141,6 +141,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
+ u32 field32;
u16 size;
u16 stat_rate;
int err;
@@ -178,6 +179,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
+#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
+#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -268,6 +271,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
+ dev_cap->udp_rss = field & 0x1;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
+ dev_cap->loopback_support = field & 0x1;
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
@@ -365,6 +372,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
#define QUERY_PORT_MAC_OFFSET 0x10
+#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
+#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
+#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -388,6 +398,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->log_max_vlans[i] = field >> 4;
MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
+ dev_cap->trans_type[i] = field32 >> 24;
+ dev_cap->vendor_oui[i] = field32 & 0xffffff;
+ MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
+ MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
}
}
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 526d7f30c041..65cc72eb899d 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -73,7 +73,13 @@ struct mlx4_dev_cap {
int max_pkeys[MLX4_MAX_PORTS + 1];
u64 def_mac[MLX4_MAX_PORTS + 1];
u16 eth_mtu[MLX4_MAX_PORTS + 1];
+ int trans_type[MLX4_MAX_PORTS + 1];
+ int vendor_oui[MLX4_MAX_PORTS + 1];
+ u16 wavelength[MLX4_MAX_PORTS + 1];
+ u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
+ int udp_rss;
+ int loopback_support;
u32 flags;
int reserved_uars;
int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 5102ab1ac561..569fa3df381f 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -184,6 +184,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.trans_type[i] = dev_cap->trans_type[i];
+ dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
+ dev->caps.wavelength[i] = dev_cap->wavelength[i];
+ dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -221,6 +225,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.bmme_flags = dev_cap->bmme_flags;
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
+ dev->caps.udp_rss = dev_cap->udp_rss;
+ dev->caps.loopback_support = dev_cap->loopback_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 449210994ee9..1fc16ab7ad2f 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -38,19 +38,19 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
-#include <linux/inet_lro.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cq.h>
#include <linux/mlx4/srq.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.4.1.1"
-#define DRV_RELDATE "June 2009"
+#define DRV_VERSION "1.5.1.6"
+#define DRV_RELDATE "August 2010"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -61,7 +61,6 @@
#define MLX4_EN_PAGE_SHIFT 12
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
-#define MAX_TX_RINGS 16
#define MAX_RX_RINGS 16
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
@@ -107,6 +106,7 @@ enum {
#define MLX4_EN_SMALL_PKT_SIZE 64
#define MLX4_EN_NUM_TX_RINGS 8
#define MLX4_EN_NUM_PPP_RINGS 8
+#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
#define MLX4_EN_DEF_TX_RING_SIZE 512
#define MLX4_EN_DEF_RX_RING_SIZE 1024
@@ -139,10 +139,14 @@ enum {
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
+#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
#define MLX4_EN_MIN_MTU 46
#define ETH_BCAST 0xffffffffffffULL
+#define MLX4_EN_LOOPBACK_RETRIES 5
+#define MLX4_EN_LOOPBACK_TIMEOUT 100
+
#ifdef MLX4_EN_PERF_STAT
/* Number of samples to 'average' */
#define AVG_SIZE 128
@@ -249,7 +253,6 @@ struct mlx4_en_rx_desc {
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
- struct net_lro_mgr lro;
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
@@ -313,7 +316,8 @@ struct mlx4_en_port_profile {
struct mlx4_en_profile {
int rss_xor;
- int num_lro;
+ int tcp_rss;
+ int udp_rss;
u8 rss_mask;
u32 active_ports;
u32 small_pkt_int;
@@ -337,6 +341,7 @@ struct mlx4_en_dev {
struct mlx4_mr mr;
u32 priv_pdn;
spinlock_t uar_lock;
+ u8 mac_removed[MLX4_MAX_PORTS + 1];
};
@@ -355,6 +360,13 @@ struct mlx4_en_rss_context {
u8 hash_fn;
u8 flags;
__be32 rss_key[10];
+ __be32 base_qpn_udp;
+};
+
+struct mlx4_en_port_state {
+ int link_state;
+ int link_speed;
+ int transciver;
};
struct mlx4_en_pkt_stats {
@@ -365,9 +377,6 @@ struct mlx4_en_pkt_stats {
};
struct mlx4_en_port_stats {
- unsigned long lro_aggregated;
- unsigned long lro_flushed;
- unsigned long lro_no_desc;
unsigned long tso_packets;
unsigned long queue_stopped;
unsigned long wake_queue;
@@ -376,7 +385,7 @@ struct mlx4_en_port_stats {
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS 11
+#define NUM_PORT_STATS 8
};
struct mlx4_en_perf_stats {
@@ -405,6 +414,7 @@ struct mlx4_en_priv {
struct vlan_group *vlgrp;
struct net_device_stats stats;
struct net_device_stats ret_stats;
+ struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
unsigned long last_moder_packets;
@@ -423,6 +433,8 @@ struct mlx4_en_priv {
u16 sample_interval;
u16 adaptive_rx_coal;
u32 msg_enable;
+ u32 loopback_ok;
+ u32 validate_loopback;
struct mlx4_hwq_resources res;
int link_state;
@@ -531,6 +543,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
+
+#define MLX4_EN_NUM_SELF_TEST 5
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
+u64 mlx4_en_mac_to_u64(u8 *addr);
/*
* Globals
@@ -555,6 +572,8 @@ do { \
en_print(KERN_WARNING, priv, format, ##arg)
#define en_err(priv, format, arg...) \
en_print(KERN_ERR, priv, format, ##arg)
+#define en_info(priv, format, arg...) \
+ en_print(KERN_INFO, priv, format, ## arg)
#define mlx4_err(mdev, format, arg...) \
pr_err("%s %s: " format, DRV_NAME, \
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 5caf0115fa5b..e749f82865fe 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -85,7 +85,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_resource tmp;
int i, j;
- profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
+ profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
if (!profile)
return -ENOMEM;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d771d1650d60..24ab8a43c777 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -239,6 +239,7 @@ struct myri10ge_priv {
int watchdog_resets;
int watchdog_pause;
int pause;
+ bool fw_name_allocated;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char *product_code_string;
@@ -271,6 +272,7 @@ MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
+/* Careful: must be accessed under kparam_block_sysfs_write */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
@@ -376,6 +378,14 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
+static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
+{
+ if (mgp->fw_name_allocated)
+ kfree(mgp->fw_name);
+ mgp->fw_name = name;
+ mgp->fw_name_allocated = allocated;
+}
+
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
@@ -747,7 +757,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
dev_warn(&mgp->pdev->dev, "via hotplug\n");
}
- mgp->fw_name = "adopted";
+ set_fw_name(mgp, "adopted", false);
mgp->tx_boundary = 2048;
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
@@ -3233,7 +3243,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
goto abort;
@@ -3261,7 +3271,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
- mgp->fw_name = myri10ge_fw_unaligned;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
@@ -3284,7 +3294,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
mgp->tx_boundary = 4096;
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
myri10ge_firmware_probe(mgp);
}
@@ -3293,22 +3303,29 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
mgp->tx_boundary = 4096;
- mgp->fw_name = myri10ge_fw_aligned;
+ set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
mgp->tx_boundary = 2048;
- mgp->fw_name = myri10ge_fw_unaligned;
+ set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
}
+
+ kparam_block_sysfs_write(myri10ge_fw_name);
if (myri10ge_fw_name != NULL) {
- overridden = 1;
- mgp->fw_name = myri10ge_fw_name;
+ char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
+ if (fw_name) {
+ overridden = 1;
+ set_fw_name(mgp, fw_name, true);
+ }
}
+ kparam_unblock_sysfs_write(myri10ge_fw_name);
+
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
strlen(myri10ge_fw_names[mgp->board_number])) {
- mgp->fw_name = myri10ge_fw_names[mgp->board_number];
+ set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
overridden = 1;
}
if (overridden)
@@ -3660,6 +3677,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
struct myri10ge_cmd cmd;
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
+ bool old_allocated;
int i, status, ncpus, msix_cap;
mgp->num_slices = 1;
@@ -3672,17 +3690,23 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
+ old_allocated = mgp->fw_name_allocated;
+ /* don't free old_fw if we override it. */
+ mgp->fw_name_allocated = false;
+
if (myri10ge_fw_name != NULL) {
dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
myri10ge_fw_name);
- mgp->fw_name = myri10ge_fw_name;
+ set_fw_name(mgp, myri10ge_fw_name, false);
} else if (old_fw == myri10ge_fw_aligned)
- mgp->fw_name = myri10ge_fw_rss_aligned;
+ set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
else
- mgp->fw_name = myri10ge_fw_rss_unaligned;
+ set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
status = myri10ge_load_firmware(mgp, 0);
if (status != 0) {
dev_info(&pdev->dev, "Rss firmware not found\n");
+ if (old_allocated)
+ kfree(old_fw);
return;
}
@@ -3729,8 +3753,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
* slices. We give up on MSI-X if we can only get a single
* vector. */
- mgp->msix_vectors = kzalloc(mgp->num_slices *
- sizeof(*mgp->msix_vectors), GFP_KERNEL);
+ mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
+ GFP_KERNEL);
if (mgp->msix_vectors == NULL)
goto disable_msix;
for (i = 0; i < mgp->num_slices; i++) {
@@ -3747,6 +3771,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices);
if (status == 0) {
pci_disable_msix(pdev);
+ if (old_allocated)
+ kfree(old_fw);
return;
}
if (status > 0)
@@ -3763,7 +3789,7 @@ disable_msix:
abort_with_fw:
mgp->num_slices = 1;
- mgp->fw_name = old_fw;
+ set_fw_name(mgp, old_fw, old_allocated);
myri10ge_load_firmware(mgp, 0);
}
@@ -3993,6 +4019,7 @@ abort_with_enabled:
pci_disable_device(pdev);
abort_with_netdev:
+ set_fw_name(mgp, NULL, false);
free_netdev(netdev);
return status;
}
@@ -4037,6 +4064,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
+ set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 1a57c3da1f49..617f898ba5f0 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -926,7 +926,7 @@ static const struct net_device_ops myri_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit myri_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1079,7 +1079,7 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
mp->dev = dev;
dev->watchdog_timeo = 5*HZ;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
dev->netdev_ops = &myri_ops;
/* Register interrupt handler now. */
@@ -1124,7 +1124,7 @@ err:
return -ENODEV;
}
-static int __devexit myri_sbus_remove(struct of_device *op)
+static int __devexit myri_sbus_remove(struct platform_device *op)
{
struct myri_eth *mp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = mp->dev;
@@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = {
static int __init myri_sbus_init(void)
{
- return of_register_driver(&myri_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&myri_sbus_driver);
}
static void __exit myri_sbus_exit(void)
{
- of_unregister_driver(&myri_sbus_driver);
+ of_unregister_platform_driver(&myri_sbus_driver);
}
module_init(myri_sbus_init);
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
index ff363e95d9cf..80a2fa5cf757 100644
--- a/drivers/net/myri_sbus.h
+++ b/drivers/net/myri_sbus.h
@@ -288,7 +288,7 @@ struct myri_eth {
struct myri_eeprom eeprom; /* Local copy of EEPROM. */
unsigned int reg_size; /* Size of register space. */
unsigned int shmem_base; /* Offset to shared ram. */
- struct of_device *myri_op; /* Our OF device struct. */
+ struct platform_device *myri_op; /* Our OF device struct. */
};
/* We use this to acquire receive skb's that we can DMA directly into. */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index a6033d48b5cc..2fd39630b1e5 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1570,7 +1570,7 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
+ np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
return 0;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index ffa1b9ce1cc5..6dca3574e355 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 73
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
+#define _NETXEN_NIC_LINUX_SUBVERSION 74
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.74"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index c865dda2adf1..cabae7bb1fc6 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1805,8 +1805,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
netxen_ctx_msg msg = 0;
struct list_head *head;
- spin_lock(&rds_ring->lock);
-
producer = rds_ring->producer;
head = &rds_ring->free_list;
@@ -1853,8 +1851,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
NETXEN_RCV_PRODUCER_OFFSET), msg);
}
}
-
- spin_unlock(&rds_ring->lock);
}
static void
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index fd86e18604e6..73d314592230 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
struct netxen_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
- memset(stats, 0, sizeof(*stats));
-
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes;
@@ -2133,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netxen_nic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- netxen_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index b9b950845b0e..e36a83845a1c 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -28,10 +28,7 @@
#include <linux/slab.h>
#include <linux/io.h>
-
-#ifdef CONFIG_SPARC64
#include <linux/of_device.h>
-#endif
#include "niu.h"
@@ -3487,7 +3484,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
RCR_ENTRY_ERROR)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
@@ -4507,7 +4504,7 @@ static int niu_alloc_channels(struct niu *np)
np->dev->real_num_tx_queues = np->num_tx_rings;
- np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
+ np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
GFP_KERNEL);
err = -ENOMEM;
if (!np->rx_rings)
@@ -4541,7 +4538,7 @@ static int niu_alloc_channels(struct niu *np)
return err;
}
- np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
+ np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
GFP_KERNEL);
err = -ENOMEM;
if (!np->tx_rings)
@@ -7272,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
struct niu_parent *parent = np->parent;
struct niu_tcam_entry *tp;
int i, idx, cnt;
- u16 n_entries;
unsigned long flags;
-
+ int ret = 0;
/* put the tcam size here */
nfc->data = tcam_get_size(np);
niu_lock_parent(np, flags);
- n_entries = nfc->rule_cnt;
for (cnt = 0, i = 0; i < nfc->data; i++) {
idx = tcam_get_index(np, i);
tp = &parent->tcam[idx];
if (!tp->valid)
continue;
+ if (cnt == nfc->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
rule_locs[cnt] = i;
cnt++;
}
niu_unlock_parent(np, flags);
- if (n_entries != cnt) {
- /* print warning, this should not happen */
- netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
- np->parent->index, __func__, n_entries, cnt);
- }
-
- return 0;
+ return ret;
}
static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
@@ -7469,10 +7462,12 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
if (fsp->flow_type == IP_USER_FLOW) {
int i;
int add_usr_cls = 0;
- int ipv6 = 0;
struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
+ if (uspec->ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+
niu_lock_parent(np, flags);
for (i = 0; i < NIU_L3_PROG_CLS; i++) {
@@ -7501,9 +7496,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
default:
break;
}
- if (uspec->ip_ver == ETH_RX_NFC_IP6)
- ipv6 = 1;
- ret = tcam_user_ip_class_set(np, class, ipv6,
+ ret = tcam_user_ip_class_set(np, class, 0,
uspec->proto,
uspec->tos,
umask->tos);
@@ -7560,16 +7553,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
ret = -EINVAL;
goto out;
case IP_USER_FLOW:
- if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) {
- niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
- class);
- } else {
- /* Not yet implemented */
- netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
- parent->index, __func__);
- ret = -EINVAL;
- goto out;
- }
+ niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
break;
default:
netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
@@ -9106,7 +9090,7 @@ retry:
static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
{
#ifdef CONFIG_SPARC64
- struct of_device *op = np->op;
+ struct platform_device *op = np->op;
const u32 *int_prop;
int i;
@@ -9114,12 +9098,12 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
if (!int_prop)
return -ENODEV;
- for (i = 0; i < op->num_irqs; i++) {
+ for (i = 0; i < op->archdata.num_irqs; i++) {
ldg_num_map[i] = int_prop[i];
- np->ldg[i].irq = op->irqs[i];
+ np->ldg[i].irq = op->archdata.irqs[i];
}
- np->num_ldg = op->num_irqs;
+ np->num_ldg = op->archdata.num_irqs;
return 0;
#else
@@ -9691,7 +9675,7 @@ static void __devinit niu_driver_version(void)
static struct net_device * __devinit niu_alloc_and_init(
struct device *gen_dev, struct pci_dev *pdev,
- struct of_device *op, const struct niu_ops *ops,
+ struct platform_device *op, const struct niu_ops *ops,
u8 port)
{
struct net_device *dev;
@@ -10067,7 +10051,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_single = niu_phys_unmap_single,
};
-static int __devinit niu_of_probe(struct of_device *op,
+static int __devinit niu_of_probe(struct platform_device *op,
const struct of_device_id *match)
{
union niu_parent_id parent_id;
@@ -10182,7 +10166,7 @@ err_out:
return err;
}
-static int __devexit niu_of_remove(struct of_device *op)
+static int __devexit niu_of_remove(struct platform_device *op)
{
struct net_device *dev = dev_get_drvdata(&op->dev);
@@ -10249,14 +10233,14 @@ static int __init niu_init(void)
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
#ifdef CONFIG_SPARC64
- err = of_register_driver(&niu_of_driver, &of_bus_type);
+ err = of_register_platform_driver(&niu_of_driver);
#endif
if (!err) {
err = pci_register_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
if (err)
- of_unregister_driver(&niu_of_driver);
+ of_unregister_platform_driver(&niu_of_driver);
#endif
}
@@ -10267,7 +10251,7 @@ static void __exit niu_exit(void)
{
pci_unregister_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
- of_unregister_driver(&niu_of_driver);
+ of_unregister_platform_driver(&niu_of_driver);
#endif
}
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index d6715465f35d..a41fa8ebe05f 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3236,7 +3236,7 @@ struct niu_phy_ops {
int (*link_status)(struct niu *np, int *);
};
-struct of_device;
+struct platform_device;
struct niu {
void __iomem *regs;
struct net_device *dev;
@@ -3297,7 +3297,7 @@ struct niu {
struct niu_vpd vpd;
u32 eeprom_len;
- struct of_device *op;
+ struct platform_device *op;
void __iomem *vir_regs_1;
void __iomem *vir_regs_2;
};
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 5a3488f76b38..3bbd0aab17e8 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -923,7 +923,7 @@ static void rx_irq(struct net_device *ndev)
if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
skb->protocol = eth_type_trans(skb, ndev);
#ifdef NS83820_VLAN_ACCEL_SUPPORT
@@ -1246,7 +1246,6 @@ static int ns83820_get_settings(struct net_device *ndev,
{
struct ns83820 *dev = PRIV(ndev);
u32 cfg, tanar, tbicr;
- int have_optical = 0;
int fullduplex = 0;
/*
@@ -1267,25 +1266,25 @@ static int ns83820_get_settings(struct net_device *ndev,
tanar = readl(dev->base + TANAR);
tbicr = readl(dev->base + TBICR);
- if (dev->CFG_cache & CFG_TBI_EN) {
- /* we have an optical interface */
- have_optical = 1;
- fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
-
- } else {
- /* We have copper */
- fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
- }
+ fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
cmd->supported = SUPPORTED_Autoneg;
- /* we have optical interface */
if (dev->CFG_cache & CFG_TBI_EN) {
+ /* we have optical interface */
cmd->supported |= SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
cmd->port = PORT_FIBRE;
- } /* TODO: else copper related support */
+ } else {
+ /* we have copper */
+ cmd->supported |= SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_MII;
+ cmd->port = PORT_MII;
+ }
cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
switch (cfg / CFG_SPDSTS0 & 3) {
@@ -1299,7 +1298,8 @@ static int ns83820_get_settings(struct net_device *ndev,
cmd->speed = SPEED_10;
break;
}
- cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0;
+ cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
+ ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@@ -1405,6 +1405,13 @@ static const struct ethtool_ops ops = {
.get_link = ns83820_get_link
};
+static inline void ns83820_disable_interrupts(struct ns83820 *dev)
+{
+ writel(0, dev->base + IMR);
+ writel(0, dev->base + IER);
+ readl(dev->base + IER);
+}
+
/* this function is called in irq context from the ISR */
static void ns83820_mib_isr(struct ns83820 *dev)
{
@@ -1557,10 +1564,7 @@ static int ns83820_stop(struct net_device *ndev)
/* FIXME: protect against interrupt handler? */
del_timer_sync(&dev->tx_watchdog);
- /* disable interrupts */
- writel(0, dev->base + IMR);
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev);
dev->rx_info.up = 0;
synchronize_irq(dev->pci_dev->irq);
@@ -2023,10 +2027,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
dev->tx_descs, (long)dev->tx_phy_descs,
dev->rx_info.descs, (long)dev->rx_info.phy_descs);
- /* disable interrupts */
- writel(0, dev->base + IMR);
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev);
dev->IMR_cache = 0;
@@ -2250,9 +2251,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
return 0;
out_cleanup:
- writel(0, dev->base + IMR); /* paranoia */
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev); /* paranoia */
out_free_irq:
rtnl_unlock();
free_irq(pci_dev->irq, ndev);
@@ -2277,9 +2276,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
if (!ndev) /* paranoia */
return;
- writel(0, dev->base + IMR); /* paranoia */
- writel(0, dev->base + IER);
- readl(dev->base + IER);
+ ns83820_disable_interrupts(dev); /* paranoia */
unregister_netdev(ndev);
free_irq(dev->pci_dev->irq, ndev);
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 8ab6ae0a6107..828e97cacdbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -808,7 +808,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
XCT_MACRX_CSUM_S;
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
packets++;
tot_bytes += len;
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index fefa79e34b95..4825959a0efe 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -90,21 +90,6 @@ pasemi_mac_ethtool_set_settings(struct net_device *netdev,
return phy_ethtool_sset(phydev, cmd);
}
-static void
-pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct pasemi_mac *mac;
- mac = netdev_priv(netdev);
-
- /* clear and fill out info */
- memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strncpy(drvinfo->driver, "pasemi_mac", 12);
- strcpy(drvinfo->version, "N/A");
- strcpy(drvinfo->fw_version, "N/A");
- strncpy(drvinfo->bus_info, pci_name(mac->pdev), 32);
-}
-
static u32
pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
{
@@ -164,7 +149,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
const struct ethtool_ops pasemi_mac_ethtool_ops = {
.get_settings = pasemi_mac_ethtool_get_settings,
.set_settings = pasemi_mac_ethtool_set_settings,
- .get_drvinfo = pasemi_mac_ethtool_get_drvinfo,
.get_msglevel = pasemi_mac_ethtool_get_msglevel,
.set_msglevel = pasemi_mac_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 56f3fc45dbaa..8dd03439d994 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1125,7 +1125,7 @@ static int netdrv_open(struct net_device *dev)
init_timer(&tp->timer);
tp->timer.expires = jiffies + 3 * HZ;
tp->timer.data = (unsigned long) dev;
- tp->timer.function = &netdrv_timer;
+ tp->timer.function = netdrv_timer;
add_timer(&tp->timer);
DPRINTK("EXIT, returning 0\n");
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 10ee106a1617..042f6777e6b9 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -69,6 +69,8 @@ earlier 3Com products.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -83,11 +85,9 @@ earlier 3Com products.
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
-#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/mii.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -239,7 +239,6 @@ static int el3_rx(struct net_device *dev, int worklimit);
static int el3_close(struct net_device *dev);
static void el3_tx_timeout(struct net_device *dev);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops netdev_ethtool_ops;
static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -279,14 +278,13 @@ static int tc574_probe(struct pcmcia_device *link)
lp->p_dev = link;
spin_lock_init(&lp->window_lock);
- link->io.NumPorts1 = 32;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
dev->netdev_ops = &el3_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->watchdog_timeo = TX_TIMEOUT;
return tc574_config(link);
@@ -338,10 +336,11 @@ static int tc574_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c574_config()\n");
- link->io.IOAddrLines = 16;
+ link->io_lines = 16;
+
for (i = j = 0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
@@ -357,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ioaddr = dev->base_addr;
@@ -376,8 +375,8 @@ static int tc574_config(struct pcmcia_device *link)
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
if (phys_addr[0] == htons(0x6060)) {
- printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx"
- "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
goto failed;
}
}
@@ -391,7 +390,7 @@ static int tc574_config(struct pcmcia_device *link)
outw(2<<11, ioaddr + RunnerRdCtrl);
mcr = inb(ioaddr + 2);
outw(0<<11, ioaddr + RunnerRdCtrl);
- printk(KERN_INFO " ASIC rev %d,", mcr>>3);
+ pr_info(" ASIC rev %d,", mcr>>3);
EL3WINDOW(3);
config = inl(ioaddr + Wn3_Config);
lp->default_media = (config & Xcvr) >> Xcvr_shift;
@@ -428,7 +427,7 @@ static int tc574_config(struct pcmcia_device *link)
}
}
if (phy > 32) {
- printk(KERN_NOTICE " No MII transceivers found!\n");
+ pr_notice(" No MII transceivers found!\n");
goto failed;
}
i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
@@ -444,18 +443,16 @@ static int tc574_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
- "hw_addr %pM.\n",
- dev->name, cardname, dev->base_addr, dev->irq,
- dev->dev_addr);
- printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n",
- 8 << config & Ram_size,
- ram_split[(config & Ram_split) >> Ram_split_shift],
- config & Autoselect ? "autoselect " : "");
+ netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
+ cardname, dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
+ 8 << config & Ram_size,
+ ram_split[(config & Ram_split) >> Ram_split_shift],
+ config & Autoselect ? "autoselect " : "");
return 0;
@@ -502,14 +499,14 @@ static void dump_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
- printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
- "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
- inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
- inw(ioaddr+TxFree));
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n",
+ inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
+ inw(ioaddr+TxFree));
EL3WINDOW(4);
- printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
- " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
- inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
EL3WINDOW(1);
}
@@ -523,7 +520,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd)
while (--i > 0)
if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
if (i == 0)
- printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n", dev->name, cmd);
+ netdev_notice(dev, "command 0x%04x did not complete!\n", cmd);
}
/* Read a word from the EEPROM using the regular EEPROM access register.
@@ -710,7 +707,7 @@ static int el3_open(struct net_device *dev)
netif_start_queue(dev);
tc574_reset(dev);
- lp->media.function = &media_check;
+ lp->media.function = media_check;
lp->media.data = (unsigned long) dev;
lp->media.expires = jiffies + HZ;
add_timer(&lp->media);
@@ -725,7 +722,7 @@ static void el3_tx_timeout(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ netdev_notice(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -848,8 +845,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
EL3WINDOW(4);
fifo_diag = inw(ioaddr + Wn4_FIFODiag);
EL3WINDOW(1);
- printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
- " register %04x.\n", dev->name, fifo_diag);
+ netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n",
+ fifo_diag);
if (fifo_diag & 0x0400) {
/* Tx overrun */
tc574_wait_for_completion(dev, TxReset);
@@ -903,7 +900,7 @@ static void media_check(unsigned long arg)
this, we can limp along even if the interrupt is blocked */
if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
if (!lp->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
el3_interrupt(dev->irq, dev);
@@ -926,23 +923,21 @@ static void media_check(unsigned long arg)
if (media != lp->media_status) {
if ((media ^ lp->media_status) & 0x0004)
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (lp->media_status & 0x0004) ? "lost" : "found");
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0004) ? "lost" : "found");
if ((media ^ lp->media_status) & 0x0020) {
lp->partner = 0;
if (lp->media_status & 0x0020) {
- printk(KERN_INFO "%s: autonegotiation restarted\n",
- dev->name);
+ netdev_info(dev, "autonegotiation restarted\n");
} else if (partner) {
partner &= lp->advertising;
lp->partner = partner;
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
- ((partner & 0x0180) ? "100" : "10"),
- ((partner & 0x0140) ? 'F' : 'H'));
+ netdev_info(dev, "autonegotiation complete: "
+ "%dbaseT-%cD selected\n",
+ (partner & 0x0180) ? 100 : 10,
+ (partner & 0x0140) ? 'F' : 'H');
} else {
- printk(KERN_INFO "%s: link partner did not autonegotiate\n",
- dev->name);
+ netdev_info(dev, "link partner did not autonegotiate\n");
}
EL3WINDOW(3);
@@ -952,10 +947,9 @@ static void media_check(unsigned long arg)
}
if (media & 0x0010)
- printk(KERN_INFO "%s: remote fault detected\n",
- dev->name);
+ netdev_info(dev, "remote fault detected\n");
if (media & 0x0002)
- printk(KERN_INFO "%s: jabber detected\n", dev->name);
+ netdev_info(dev, "jabber detected\n");
lp->media_status = media;
}
spin_unlock_irqrestore(&lp->window_lock, flags);
@@ -1065,16 +1059,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
return worklimit;
}
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "3c574_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
/* Provide ioctl() calls to examine the MII xcvr state. */
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index ce63c3773b4c..35562a395770 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -19,6 +19,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "3c589_cs"
#define DRV_VERSION "1.162-ac"
@@ -41,7 +43,6 @@
#include <linux/bitops.h>
#include <linux/jiffies.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -214,8 +215,8 @@ static int tc589_probe(struct pcmcia_device *link)
lp->p_dev = link;
spin_lock_init(&lp->lock);
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -265,7 +266,7 @@ static int tc589_config(struct pcmcia_device *link)
__be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
unsigned int ioaddr;
- char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
size_t len;
@@ -274,16 +275,16 @@ static int tc589_config(struct pcmcia_device *link)
phys_addr = (__be16 *)dev->dev_addr;
/* Is this a 3c562? */
if (link->manf_id != MANFID_3COM)
- printk(KERN_INFO "3c589_cs: hmmm, is this really a "
- "3Com card??\n");
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
multi = (link->card_id == PRODID_3COM_3C562);
+ link->io_lines = 16;
+
/* For the 3c562, the base address must be xx00-xx7f */
- link->io.IOAddrLines = 16;
for (i = j = 0; j < 0x400; j += 0x10) {
if (multi && (j & 0x80)) continue;
- link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
@@ -299,7 +300,7 @@ static int tc589_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ioaddr = dev->base_addr;
EL3WINDOW(0);
@@ -315,8 +316,8 @@ static int tc589_config(struct pcmcia_device *link)
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
if (phys_addr[0] == htons(0x6060)) {
- printk(KERN_ERR "3c589_cs: IO port conflict at 0x%03lx"
- "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
goto failed;
}
}
@@ -330,12 +331,12 @@ static int tc589_config(struct pcmcia_device *link)
if ((if_port >= 0) && (if_port <= 3))
dev->if_port = if_port;
else
- printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
+ dev_err(&link->dev, "invalid if_port requested\n");
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
+ dev_err(&link->dev, "register_netdev() failed\n");
goto failed;
}
@@ -537,7 +538,7 @@ static int el3_open(struct net_device *dev)
tc589_reset(dev);
init_timer(&lp->media);
- lp->media.function = &media_check;
+ lp->media.function = media_check;
lp->media.data = (unsigned long) dev;
lp->media.expires = jiffies + HZ;
add_timer(&lp->media);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 33525bf2a3d3..3f61fde70d73 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -24,6 +24,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -32,14 +34,12 @@
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include "../8390.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -87,7 +87,6 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
static struct net_device_stats *get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void axnet_tx_timeout(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(u_long arg);
static void axnet_reset_8390(struct net_device *dev);
@@ -172,7 +171,6 @@ static int axnet_probe(struct pcmcia_device *link)
dev->netdev_ops = &axnet_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->watchdog_timeo = TX_TIMEOUT;
return axnet_config(link);
@@ -260,28 +258,30 @@ static int get_prom(struct pcmcia_device *link)
static int try_io_port(struct pcmcia_device *link)
{
int j, ret;
- if (link->io.NumPorts1 == 32) {
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ if (link->resource[0]->end == 32) {
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
/* for master/slave multifunction cards */
- if (link->io.NumPorts2 > 0)
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (link->resource[1]->end > 0)
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
} else {
/* This should be two 16-port windows */
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
}
- if (link->io.BasePort1 == 0) {
- link->io.IOAddrLines = 16;
+ if (link->resource[0]->start == 0) {
for (j = 0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- link->io.BasePort2 = (j ^ 0x300) + 0x10;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ link->resource[1]->start = (j ^ 0x300) + 0x10;
+ link->io_lines = 16;
+ ret = pcmcia_request_io(link);
if (ret == 0)
return ret;
}
return ret;
} else {
- return pcmcia_request_io(link, &link->io);
+ return pcmcia_request_io(link);
}
}
@@ -302,15 +302,15 @@ static int axnet_configcheck(struct pcmcia_device *p_dev,
network function with window 0, and serial with window 1 */
if (io->nwin > 1) {
i = (io->win[1].len > io->win[0].len);
- p_dev->io.BasePort2 = io->win[1-i].base;
- p_dev->io.NumPorts2 = io->win[1-i].len;
+ p_dev->resource[1]->start = io->win[1-i].base;
+ p_dev->resource[1]->end = io->win[1-i].len;
} else {
- i = p_dev->io.NumPorts2 = 0;
+ i = p_dev->resource[1]->end = 0;
}
- p_dev->io.BasePort1 = io->win[i].base;
- p_dev->io.NumPorts1 = io->win[i].len;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- if (p_dev->io.NumPorts1 + p_dev->io.NumPorts2 >= 32)
+ p_dev->resource[0]->start = io->win[i].base;
+ p_dev->resource[0]->end = io->win[i].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
return try_io_port(p_dev);
return -ENODEV;
@@ -333,7 +333,7 @@ static int axnet_config(struct pcmcia_device *link)
if (!link->irq)
goto failed;
- if (link->io.NumPorts2 == 8) {
+ if (resource_size(link->resource[1]) == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
}
@@ -343,11 +343,11 @@ static int axnet_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
if (!get_prom(link)) {
- printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n");
- printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n");
+ pr_notice("this is not an AX88190 card!\n");
+ pr_notice("use pcnet_cs instead.\n");
goto failed;
}
@@ -356,10 +356,10 @@ static int axnet_config(struct pcmcia_device *link)
ei_status.tx_start_page = AXNET_START_PG;
ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
ei_status.stop_page = AXNET_STOP_PG;
- ei_status.reset_8390 = &axnet_reset_8390;
- ei_status.get_8390_hdr = &get_8390_hdr;
- ei_status.block_input = &block_input;
- ei_status.block_output = &block_output;
+ ei_status.reset_8390 = axnet_reset_8390;
+ ei_status.get_8390_hdr = get_8390_hdr;
+ ei_status.block_input = block_input;
+ ei_status.block_output = block_output;
if (inb(dev->base_addr + AXNET_TEST) != 0)
info->flags |= IS_AX88790;
@@ -379,8 +379,7 @@ static int axnet_config(struct pcmcia_device *link)
/* Maybe PHY is in power down mode. (PPD_SET = 1)
Bit 2 of CCSR is active low. */
if (i == 32) {
- conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
- pcmcia_access_configuration_register(link, &reg);
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -393,19 +392,18 @@ static int axnet_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
- dev->base_addr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n",
+ ((info->flags & IS_AX88790) ? 7 : 1),
+ dev->base_addr, dev->irq, dev->dev_addr);
if (info->phy_id != -1) {
- dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j);
+ netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
+ info->phy_id, j);
} else {
- printk(KERN_NOTICE " No MII transceivers found!\n");
+ netdev_notice(dev, " No MII transceivers found!\n");
}
return 0;
@@ -532,7 +530,7 @@ static int axnet_open(struct net_device *dev)
info->link_status = 0x00;
init_timer(&info->watchdog);
- info->watchdog.function = &ei_watchdog;
+ info->watchdog.function = ei_watchdog;
info->watchdog.data = (u_long)dev;
info->watchdog.expires = jiffies + HZ;
add_timer(&info->watchdog);
@@ -585,8 +583,7 @@ static void axnet_reset_8390(struct net_device *dev)
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
if (i == 100)
- printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n",
- dev->name);
+ netdev_err(dev, "axnet_reset_8390() did not complete\n");
} /* axnet_reset_8390 */
@@ -613,7 +610,7 @@ static void ei_watchdog(u_long arg)
this, we can limp along even if the interrupt is blocked */
if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
if (!info->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
ei_irq_wrapper(dev->irq, dev);
info->fast_poll = HZ;
}
@@ -628,7 +625,7 @@ static void ei_watchdog(u_long arg)
goto reschedule;
link = mdio_read(mii_addr, info->phy_id, 1);
if (!link || (link == 0xffff)) {
- printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ netdev_info(dev, "MII is missing!\n");
info->phy_id = -1;
goto reschedule;
}
@@ -636,18 +633,14 @@ static void ei_watchdog(u_long arg)
link &= 0x0004;
if (link != info->link_status) {
u_short p = mdio_read(mii_addr, info->phy_id, 5);
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (link) ? "found" : "lost");
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
if (link) {
info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
if (p)
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
- ((p & 0x0180) ? "100" : "10"),
- ((p & 0x0140) ? 'F' : 'H'));
+ netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n",
+ (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
else
- printk(KERN_INFO "%s: link partner did not autonegotiate\n",
- dev->name);
+ netdev_info(dev, "link partner did not autonegotiate\n");
AX88190_init(dev, 1);
}
info->link_status = link;
@@ -658,16 +651,6 @@ reschedule:
add_timer(&info->watchdog);
}
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "axnet_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
/*====================================================================*/
static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -855,9 +838,6 @@ module_exit(exit_axnet_cs);
*/
-static const char version_8390[] = KERN_INFO \
- "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
-
#include <linux/bitops.h>
#include <asm/irq.h>
#include <linux/fcntl.h>
@@ -1004,9 +984,11 @@ static void axnet_tx_timeout(struct net_device *dev)
isr = inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+ netdev_printk(KERN_DEBUG, dev,
+ "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
if (!isr && !dev->stats.tx_packets)
{
@@ -1076,22 +1058,28 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
if (ei_debug && ei_local->tx2 > 0)
- printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ netdev_printk(KERN_DEBUG, dev,
+ "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx,
+ ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
- printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ netdev_printk(KERN_DEBUG, dev,
+ "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx,
+ ei_local->txing);
}
else
{ /* We should never get here. */
if (ei_debug)
- printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netdev_printk(KERN_DEBUG, dev,
+ "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2,
+ ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1179,23 +1167,26 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&ei_local->page_lock, flags);
- if (ei_local->irqlock)
- {
+ if (ei_local->irqlock) {
#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ const char *msg;
/* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
- dev->name, inb_p(e8390_base + EN0_ISR),
- inb_p(e8390_base + EN0_IMR));
+ if (ei_local->irqlock)
+ msg = "Interrupted while interrupts are masked!";
+ else
+ msg = "Reentering the interrupt handler!";
+ netdev_info(dev, "%s, isr=%#2x imr=%#2x\n",
+ msg,
+ inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
#endif
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return IRQ_NONE;
}
if (ei_debug > 3)
- printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
- inb_p(e8390_base + EN0_ISR));
+ netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
+ inb_p(e8390_base + EN0_ISR));
outb_p(0x00, e8390_base + EN0_ISR);
ei_local->irqlock = 1;
@@ -1206,7 +1197,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
{
if (!netif_running(dev) || (interrupts == 0xff)) {
if (ei_debug > 1)
- printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ netdev_warn(dev,
+ "interrupt from stopped card\n");
outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
break;
@@ -1249,11 +1241,12 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
{
/* 0xFF is valid for a card removal */
if(interrupts!=0xFF)
- printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
- dev->name, interrupts);
+ netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
- printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ netdev_warn(dev, "unknown interrupt %#2x\n",
+ interrupts);
outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
}
}
@@ -1287,18 +1280,19 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ netdev_printk(KERN_DEBUG, dev,
+ "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
- printk("excess-collisions ");
+ pr_cont(" excess-collisions");
if (txsr & ENTSR_ND)
- printk("non-deferral ");
+ pr_cont(" non-deferral");
if (txsr & ENTSR_CRS)
- printk("lost-carrier ");
+ pr_cont(" lost-carrier");
if (txsr & ENTSR_FU)
- printk("FIFO-underrun ");
+ pr_cont(" FIFO-underrun");
if (txsr & ENTSR_CDH)
- printk("lost-heartbeat ");
- printk("\n");
+ pr_cont(" lost-heartbeat");
+ pr_cont("\n");
#endif
if (tx_was_aborted)
@@ -1335,8 +1329,9 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx1 < 0)
{
if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
- printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx1);
+ netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx1);
ei_local->tx1 = 0;
if (ei_local->tx2 > 0)
{
@@ -1351,8 +1346,9 @@ static void ei_tx_intr(struct net_device *dev)
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx2);
+ netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx,
+ ei_local->tx2);
ei_local->tx2 = 0;
if (ei_local->tx1 > 0)
{
@@ -1365,8 +1361,9 @@ static void ei_tx_intr(struct net_device *dev)
else
ei_local->lasttx = 10, ei_local->txing = 0;
}
-// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
-// dev->name, ei_local->lasttx);
+// else
+// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
+// ei_local->lasttx);
/* Minimize Tx latency: update the statistics after we restart TXing. */
if (status & ENTSR_COL)
@@ -1429,8 +1426,8 @@ static void ei_receive(struct net_device *dev)
is that some clones crash in roughly the same way.
*/
if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
- dev->name, this_frame, ei_local->current_page);
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -1446,9 +1443,10 @@ static void ei_receive(struct net_device *dev)
if (pkt_len < 60 || pkt_len > 1518)
{
if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
- dev->name, rx_frame.count, rx_frame.status,
- rx_frame.next);
+ netdev_printk(KERN_DEBUG, dev,
+ "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
+ rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
@@ -1460,8 +1458,9 @@ static void ei_receive(struct net_device *dev)
if (skb == NULL)
{
if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
- dev->name, pkt_len);
+ netdev_printk(KERN_DEBUG, dev,
+ "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
}
@@ -1481,9 +1480,10 @@ static void ei_receive(struct net_device *dev)
else
{
if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- dev->name, rx_frame.status, rx_frame.next,
- rx_frame.count);
+ netdev_printk(KERN_DEBUG, dev,
+ "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
+ rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
@@ -1493,8 +1493,8 @@ static void ei_receive(struct net_device *dev)
/* This _should_ never happen: it's here for avoiding bad clones. */
if (next_frame >= ei_local->stop_page) {
- printk("%s: next frame inconsistency, %#2x\n", dev->name,
- next_frame);
+ netdev_info(dev, "next frame inconsistency, %#2x\n",
+ next_frame);
next_frame = ei_local->rx_start_page;
}
ei_local->current_page = next_frame;
@@ -1529,7 +1529,7 @@ static void ei_rx_overrun(struct net_device *dev)
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -1726,7 +1726,7 @@ static void AX88190_init(struct net_device *dev, int startp)
{
outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
- printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ netdev_err(dev, "Hw. address read/write mismap %d\n", i);
}
outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
@@ -1763,8 +1763,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
if (inb_p(e8390_base) & E8390_TRANS)
{
- printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
- dev->name);
+ netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
return;
}
outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 5643f94541bc..f065c35cd4b7 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -43,7 +43,6 @@
#include <linux/arcdevice.h>
#include <linux/com20020.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -53,23 +52,23 @@
#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
-#ifdef DEBUG
static void regdump(struct net_device *dev)
{
+#ifdef DEBUG
int ioaddr = dev->base_addr;
int count;
- printk("com20020 register dump:\n");
+ netdev_dbg(dev, "register dump:\n");
for (count = ioaddr; count < ioaddr + 16; count++)
{
if (!(count % 16))
- printk("\n%04X: ", count);
- printk("%02X ", inb(count));
+ pr_cont("%04X:", count);
+ pr_cont(" %02X", inb(count));
}
- printk("\n");
+ pr_cont("\n");
- printk("buffer0 dump:\n");
+ netdev_dbg(dev, "buffer0 dump:\n");
/* set up the address register */
count = 0;
outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
@@ -78,19 +77,15 @@ static void regdump(struct net_device *dev)
for (count = 0; count < 256+32; count++)
{
if (!(count % 16))
- printk("\n%04X: ", count);
+ pr_cont("%04X:", count);
/* copy the data */
- printk("%02X ", inb(_MEMDATA));
+ pr_cont(" %02X", inb(_MEMDATA));
}
- printk("\n");
+ pr_cont("\n");
+#endif
}
-#else
-
-static inline void regdump(struct net_device *dev) { }
-
-#endif
/*====================================================================*/
@@ -159,9 +154,8 @@ static int com20020_probe(struct pcmcia_device *p_dev)
/* fill in our module parameters as defaults */
dev->dev_addr[0] = node;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.NumPorts1 = 16;
- p_dev->io.IOAddrLines = 16;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ p_dev->resource[0]->end = 16;
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -246,20 +240,24 @@ static int com20020_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "com20020_config\n");
- dev_dbg(&link->dev, "baseport1 is %Xh\n", link->io.BasePort1);
+ dev_dbg(&link->dev, "baseport1 is %Xh\n",
+ (unsigned int) link->resource[0]->start);
+
i = -ENODEV;
- if (!link->io.BasePort1)
+ link->io_lines = 16;
+
+ if (!link->resource[0]->start)
{
for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10)
{
- link->io.BasePort1 = ioaddr;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = ioaddr;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
}
else
- i = pcmcia_request_io(link, &link->io);
+ i = pcmcia_request_io(link);
if (i != 0)
{
@@ -267,7 +265,7 @@ static int com20020_config(struct pcmcia_device *link)
goto failed;
}
- ioaddr = dev->base_addr = link->io.BasePort1;
+ ioaddr = dev->base_addr = link->resource[0]->start;
dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
dev_dbg(&link->dev, "request IRQ %d\n",
@@ -299,13 +297,13 @@ static int com20020_config(struct pcmcia_device *link)
i = com20020_found(dev, 0); /* calls register_netdev */
if (i != 0) {
- dev_printk(KERN_NOTICE, &link->dev,
- "com20020_cs: com20020_found() failed\n");
+ dev_notice(&link->dev,
+ "com20020_found() failed\n");
goto failed;
}
- dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
- dev->name, dev->base_addr, dev->irq);
+ netdev_dbg(dev, "port %#3lx, irq %d\n",
+ dev->base_addr, dev->irq);
return 0;
failed:
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7c27c50211a5..8f26d548d1bb 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -28,6 +28,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "fmvj18x_cs"
#define DRV_VERSION "2.9"
@@ -49,7 +51,6 @@
#include <linux/ioport.h>
#include <linux/crc32.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -249,9 +250,8 @@ static int fmvj18x_probe(struct pcmcia_device *link)
lp->base = NULL;
/* The io structure describes IO port mapping */
- link->io.NumPorts1 = 32;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 5;
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
@@ -289,13 +289,13 @@ static int mfc_try_io_port(struct pcmcia_device *link)
{ 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
for (i = 0; i < 5; i++) {
- link->io.BasePort2 = serial_base[i];
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- if (link->io.BasePort2 == 0) {
- link->io.NumPorts2 = 0;
- printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
+ link->resource[1]->start = serial_base[i];
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ if (link->resource[1]->start == 0) {
+ link->resource[1]->end = 0;
+ pr_notice("out of resource for serial\n");
}
- ret = pcmcia_request_io(link, &link->io);
+ ret = pcmcia_request_io(link);
if (ret == 0)
return ret;
}
@@ -311,12 +311,12 @@ static int ungermann_try_io_port(struct pcmcia_device *link)
0x380,0x3c0 only for ioport.
*/
for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
- link->io.BasePort1 = ioaddr;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = ioaddr;
+ ret = pcmcia_request_io(link);
if (ret == 0) {
/* calculate ConfigIndex value */
link->conf.ConfigIndex =
- ((link->io.BasePort1 & 0x0f0) >> 3) | 0x22;
+ ((link->resource[0]->start & 0x0f0) >> 3) | 0x22;
return ret;
}
}
@@ -346,6 +346,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "fmvj18x_config\n");
+ link->io_lines = 5;
+
len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
kfree(buf);
@@ -364,20 +366,20 @@ static int fmvj18x_config(struct pcmcia_device *link)
/* MultiFunction Card */
link->conf.ConfigBase = 0x800;
link->conf.ConfigIndex = 0x47;
- link->io.NumPorts2 = 8;
+ link->resource[1]->end = 8;
}
break;
case MANFID_NEC:
cardtype = NEC; /* MultiFunction Card */
link->conf.ConfigBase = 0x800;
link->conf.ConfigIndex = 0x47;
- link->io.NumPorts2 = 8;
+ link->resource[1]->end = 8;
break;
case MANFID_KME:
cardtype = KME; /* MultiFunction Card */
link->conf.ConfigBase = 0x800;
link->conf.ConfigIndex = 0x47;
- link->io.NumPorts2 = 8;
+ link->resource[1]->end = 8;
break;
case MANFID_CONTEC:
cardtype = CONTEC;
@@ -418,14 +420,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
}
- if (link->io.NumPorts2 != 0) {
+ if (link->resource[1]->end != 0) {
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
ret = ungermann_try_io_port(link);
if (ret != 0) goto failed;
} else {
- ret = pcmcia_request_io(link, &link->io);
+ ret = pcmcia_request_io(link);
if (ret)
goto failed;
}
@@ -437,9 +439,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
- if (link->io.BasePort2 != 0) {
+ if (resource_size(link->resource[1]) != 0) {
ret = fmvj18x_setup_mfc(link);
if (ret != 0) goto failed;
}
@@ -503,7 +505,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
case XXX10304:
/* Read MACID from Buggy CIS */
if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
- printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n");
+ pr_notice("unable to read hardware net address\n");
goto failed;
}
for (i = 0 ; i < 6; i++) {
@@ -524,15 +526,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
/* print current configuration */
- printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
- dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n",
+ card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
+ dev->base_addr, dev->irq, dev->dev_addr);
return 0;
@@ -545,7 +546,6 @@ failed:
static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
{
win_req_t req;
- memreq_t mem;
u_char __iomem *base;
int i, j;
@@ -558,9 +558,7 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
return -1;
base = ioremap(req.Base, req.Size);
- mem.Page = 0;
- mem.CardOffset = 0;
- pcmcia_map_mem_page(link, link->win, &mem);
+ pcmcia_map_mem_page(link, link->win, 0);
/*
* MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
@@ -594,7 +592,6 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
static int fmvj18x_setup_mfc(struct pcmcia_device *link)
{
win_req_t req;
- memreq_t mem;
int i;
struct net_device *dev = link->priv;
unsigned int ioaddr;
@@ -610,13 +607,11 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
lp->base = ioremap(req.Base, req.Size);
if (lp->base == NULL) {
- printk(KERN_NOTICE "fmvj18x_cs: ioremap failed\n");
+ netdev_notice(dev, "ioremap failed\n");
return -1;
}
- mem.Page = 0;
- mem.CardOffset = 0;
- i = pcmcia_map_mem_page(link, link->win, &mem);
+ i = pcmcia_map_mem_page(link, link->win, 0);
if (i != 0) {
iounmap(lp->base);
lp->base = NULL;
@@ -806,17 +801,16 @@ static void fjn_tx_timeout(struct net_device *dev)
struct local_info_t *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
- printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
- dev->name, htons(inw(ioaddr + TX_STATUS)),
- inb(ioaddr + TX_STATUS) & F_TMT_RDY
- ? "IRQ conflict" : "network cable problem");
- printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x "
- "%04x %04x %04x %04x %04x.\n",
- dev->name, htons(inw(ioaddr + 0)),
- htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
- htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
- htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
- htons(inw(ioaddr +14)));
+ netdev_notice(dev, "transmit timed out with status %04x, %s?\n",
+ htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & F_TMT_RDY
+ ? "IRQ conflict" : "network cable problem");
+ netdev_notice(dev, "timeout registers: %04x %04x %04x "
+ "%04x %04x %04x %04x %04x.\n",
+ htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)),
+ htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)),
+ htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)),
+ htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14)));
dev->stats.tx_errors++;
/* ToDo: We should try to restart the adaptor... */
local_irq_disable();
@@ -851,13 +845,13 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
unsigned char *buf = skb->data;
if (length > ETH_FRAME_LEN) {
- printk(KERN_NOTICE "%s: Attempting to send a large packet"
- " (%d bytes).\n", dev->name, length);
+ netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n",
+ length);
return NETDEV_TX_BUSY;
}
- pr_debug("%s: Transmitting a packet of length %lu.\n",
- dev->name, (unsigned long)skb->len);
+ netdev_dbg(dev, "Transmitting a packet of length %lu\n",
+ (unsigned long)skb->len);
dev->stats.tx_bytes += skb->len;
/* Disable both interrupts. */
@@ -910,7 +904,7 @@ static void fjn_reset(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
int i;
- pr_debug("fjn_reset(%s) called.\n",dev->name);
+ netdev_dbg(dev, "fjn_reset() called\n");
/* Reset controller */
if( sram_config == 0 )
@@ -994,8 +988,8 @@ static void fjn_rx(struct net_device *dev)
while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
u_short status = inw(ioaddr + DATAPORT);
- pr_debug("%s: Rxing packet mode %02x status %04x.\n",
- dev->name, inb(ioaddr + RX_MODE), status);
+ netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n",
+ inb(ioaddr + RX_MODE), status);
#ifndef final_version
if (status == 0) {
outb(F_SKP_PKT, ioaddr + RX_SKIP);
@@ -1014,16 +1008,16 @@ static void fjn_rx(struct net_device *dev)
struct sk_buff *skb;
if (pkt_len > 1550) {
- printk(KERN_NOTICE "%s: The FMV-18x claimed a very "
- "large packet, size %d.\n", dev->name, pkt_len);
+ netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n",
+ pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_errors++;
break;
}
skb = dev_alloc_skb(pkt_len+2);
if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping "
- "packet (len %d).\n", dev->name, pkt_len);
+ netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
+ pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_dropped++;
break;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 67ee9851a8ed..dc85282193bf 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -45,6 +45,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
@@ -52,12 +54,10 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/ibmtr.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -108,16 +108,6 @@ typedef struct ibmtr_dev_t {
struct tok_info *ti;
} ibmtr_dev_t;
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "ibmtr_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
ibmtr_dev_t *info = dev_id;
struct net_device *dev = info->dev;
@@ -152,17 +142,14 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
link->priv = info;
info->ti = netdev_priv(dev);
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts1 = 4;
- link->io.IOAddrLines = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[0]->end = 4;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
info->dev = dev;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
-
return ibmtr_config(link);
} /* ibmtr_attach */
@@ -213,26 +200,26 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
struct net_device *dev = info->dev;
struct tok_info *ti = netdev_priv(dev);
win_req_t req;
- memreq_t mem;
int i, ret;
dev_dbg(&link->dev, "ibmtr_config\n");
link->conf.ConfigIndex = 0x61;
+ link->io_lines = 16;
/* Determine if this is PRIMARY or ALTERNATE. */
/* Try PRIMARY card at 0xA20-0xA23 */
- link->io.BasePort1 = 0xA20;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = 0xA20;
+ i = pcmcia_request_io(link);
if (i != 0) {
/* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
- link->io.BasePort1 = 0xA24;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = 0xA24;
+ ret = pcmcia_request_io(link);
if (ret)
goto failed;
}
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
if (ret)
@@ -251,9 +238,7 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
if (ret)
goto failed;
- mem.CardOffset = mmiobase;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
+ ret = pcmcia_map_mem_page(link, link->win, mmiobase);
if (ret)
goto failed;
ti->mmio = ioremap(req.Base, req.Size);
@@ -268,13 +253,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
if (ret)
goto failed;
- mem.CardOffset = srambase;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, info->sram_win_handle, &mem);
+ ret = pcmcia_map_mem_page(link, info->sram_win_handle, srambase);
if (ret)
goto failed;
- ti->sram_base = mem.CardOffset >> 12;
+ ti->sram_base = srambase >> 12;
ti->sram_virt = ioremap(req.Base, req.Size);
ti->sram_phys = req.Base;
@@ -291,15 +274,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
i = ibmtr_probe_card(dev);
if (i != 0) {
- printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO
- "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
- dev->name, dev->base_addr, dev->irq,
- (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
- dev->dev_addr);
+ netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
+ dev->base_addr, dev->irq,
+ (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
+ dev->dev_addr);
return 0;
failed:
@@ -325,7 +307,6 @@ static void ibmtr_release(struct pcmcia_device *link)
if (link->win) {
struct tok_info *ti = netdev_priv(dev);
iounmap(ti->mmio);
- pcmcia_release_window(link, info->sram_win_handle);
}
pcmcia_disable_device(link);
}
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 9b63dec549cb..89cf63bb8c91 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -111,6 +111,8 @@ Log: nmclan_cs.c,v
---------------------------------------------------------------------------- */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "nmclan_cs"
#define DRV_VERSION "0.16"
@@ -146,7 +148,6 @@ Include Files
#include <linux/ioport.h>
#include <linux/bitops.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
@@ -459,9 +460,8 @@ static int nmclan_probe(struct pcmcia_device *link)
link->priv = dev;
spin_lock_init(&lp->bank_lock);
- link->io.NumPorts1 = 32;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 5;
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -565,7 +565,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
/* Wait for reset bit to be cleared automatically after <= 200ns */;
if(++ct > 500)
{
- printk(KERN_ERR "mace: reset failed, card removed ?\n");
+ pr_err("reset failed, card removed?\n");
return -1;
}
udelay(1);
@@ -612,7 +612,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
{
if(++ ct > 500)
{
- printk(KERN_ERR "mace: ADDRCHG timeout, card removed ?\n");
+ pr_err("ADDRCHG timeout, card removed?\n");
return -1;
}
}
@@ -645,7 +645,8 @@ static int nmclan_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_config\n");
- ret = pcmcia_request_io(link, &link->io);
+ link->io_lines = 5;
+ ret = pcmcia_request_io(link);
if (ret)
goto failed;
ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
@@ -656,7 +657,7 @@ static int nmclan_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
ioaddr = dev->base_addr;
@@ -679,8 +680,8 @@ static int nmclan_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
sig[0], sig[1]);
} else {
- printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
- " be 0x40 0x?9\n", sig[0], sig[1]);
+ pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ sig[0], sig[1]);
return -ENODEV;
}
}
@@ -692,20 +693,18 @@ static int nmclan_config(struct pcmcia_device *link)
if (if_port <= 2)
dev->if_port = if_port;
else
- printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
+ pr_notice("invalid if_port requested\n");
SET_NETDEV_DEV(dev, &link->dev);
i = register_netdev(dev);
if (i != 0) {
- printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
- printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
- " hw_addr %pM\n",
- dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
- dev->dev_addr);
+ netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
+ dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
return 0;
failed:
@@ -758,29 +757,20 @@ static void nmclan_reset(struct net_device *dev)
#if RESET_XILINX
struct pcmcia_device *link = &lp->link;
- conf_reg_t reg;
- u_long OrigCorValue;
+ u8 OrigCorValue;
/* Save original COR value */
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- reg.Value = 0;
- pcmcia_access_configuration_register(link, &reg);
- OrigCorValue = reg.Value;
+ pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue);
/* Reset Xilinx */
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
+ dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n",
OrigCorValue);
- reg.Value = COR_SOFT_RESET;
- pcmcia_access_configuration_register(link, &reg);
+ pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET);
/* Need to wait for 20 ms for PCMCIA to finish reset. */
/* Restore original COR configuration index */
- reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
- pcmcia_access_configuration_register(link, &reg);
+ pcmcia_write_config_byte(link, CISREG_COR,
+ (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK)));
/* Xilinx is now completely reset along with the MACE chip. */
lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
@@ -808,8 +798,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 2) {
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n", dev->name,
- if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
} else
return -EINVAL;
}
@@ -888,12 +877,12 @@ static void mace_tx_timeout(struct net_device *dev)
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
+ netdev_notice(dev, "transmit timed out -- ");
#if RESET_ON_TIMEOUT
- printk("resetting card\n");
+ pr_cont("resetting card\n");
pcmcia_reset_card(link->socket);
#else /* #if RESET_ON_TIMEOUT */
- printk("NOT resetting card\n");
+ pr_cont("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
@@ -975,22 +964,21 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
ioaddr = dev->base_addr;
if (lp->tx_irq_disabled) {
- printk(
- (lp->tx_irq_disabled?
- KERN_NOTICE "%s: Interrupt with tx_irq_disabled "
- "[isr=%02X, imr=%02X]\n":
- KERN_NOTICE "%s: Re-entering the interrupt handler "
- "[isr=%02X, imr=%02X]\n"),
- dev->name,
- inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
- inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)
- );
+ const char *msg;
+ if (lp->tx_irq_disabled)
+ msg = "Interrupt with tx_irq_disabled";
+ else
+ msg = "Re-entering the interrupt handler";
+ netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
+ msg,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
/* WARNING: MACE_IR has been read! */
return IRQ_NONE;
}
if (!netif_device_present(dev)) {
- pr_debug("%s: interrupt from dead card\n", dev->name);
+ netdev_dbg(dev, "interrupt from dead card\n");
return IRQ_NONE;
}
@@ -1388,8 +1376,8 @@ static void BuildLAF(int *ladrf, int *adr)
printk(KERN_DEBUG " adr =%pM\n", adr);
printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
for (i = 0; i < 8; i++)
- printk(KERN_CONT " %02X", ladrf[i]);
- printk(KERN_CONT "\n");
+ pr_cont(" %02X", ladrf[i]);
+ pr_cont("\n");
#endif
} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index bfdef72c5d5e..e180832c278f 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -28,6 +28,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -35,14 +37,12 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
-#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/log2.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include "../8390.h"
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -101,7 +101,6 @@ static void pcnet_release(struct pcmcia_device *link);
static int pcnet_open(struct net_device *dev);
static int pcnet_close(struct net_device *dev);
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops netdev_ethtool_ops;
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(u_long arg);
static void pcnet_reset_8390(struct net_device *dev);
@@ -113,8 +112,6 @@ static int setup_dma_config(struct pcmcia_device *link, int start_pg,
static void pcnet_detach(struct pcmcia_device *p_dev);
-static dev_info_t dev_info = "pcnet_cs";
-
/*====================================================================*/
typedef struct hw_info_t {
@@ -304,7 +301,6 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
win_req_t req;
- memreq_t mem;
u_char __iomem *base, *virt;
int i, j;
@@ -317,10 +313,8 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
return NULL;
virt = ioremap(req.Base, req.Size);
- mem.Page = 0;
for (i = 0; i < NR_INFO; i++) {
- mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
- pcmcia_map_mem_page(link, link->win, &mem);
+ pcmcia_map_mem_page(link, link->win, hw_info[i].offset & ~(req.Size-1));
base = &virt[hw_info[i].offset & (req.Size-1)];
if ((readb(base+0) == hw_info[i].a0) &&
(readb(base+2) == hw_info[i].a1) &&
@@ -440,8 +434,6 @@ static hw_info_t *get_ax88190(struct pcmcia_device *link)
dev->dev_addr[i] = j & 0xff;
dev->dev_addr[i+1] = j >> 8;
}
- printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
- printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
return NULL;
}
@@ -480,29 +472,31 @@ static hw_info_t *get_hwired(struct pcmcia_device *link)
static int try_io_port(struct pcmcia_device *link)
{
int j, ret;
- if (link->io.NumPorts1 == 32) {
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (link->io.NumPorts2 > 0) {
+ link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH;
+ if (link->resource[0]->end == 32) {
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ if (link->resource[1]->end > 0) {
/* for master/slave multifunction cards */
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
}
} else {
/* This should be two 16-port windows */
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
}
- if (link->io.BasePort1 == 0) {
- link->io.IOAddrLines = 16;
+ if (link->resource[0]->start == 0) {
for (j = 0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- link->io.BasePort2 = (j ^ 0x300) + 0x10;
- ret = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j ^ 0x300;
+ link->resource[1]->start = (j ^ 0x300) + 0x10;
+ link->io_lines = 16;
+ ret = pcmcia_request_io(link);
if (ret == 0)
return ret;
}
return ret;
} else {
- return pcmcia_request_io(link, &link->io);
+ return pcmcia_request_io(link);
}
}
@@ -523,18 +517,18 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
network function with window 0, and serial with window 1 */
if (io->nwin > 1) {
i = (io->win[1].len > io->win[0].len);
- p_dev->io.BasePort2 = io->win[1-i].base;
- p_dev->io.NumPorts2 = io->win[1-i].len;
+ p_dev->resource[1]->start = io->win[1-i].base;
+ p_dev->resource[1]->end = io->win[1-i].len;
} else {
- i = p_dev->io.NumPorts2 = 0;
+ i = p_dev->resource[1]->end = 0;
}
*has_shmem = ((cfg->mem.nwin == 1) &&
(cfg->mem.win[0].len >= 0x4000));
- p_dev->io.BasePort1 = io->win[i].base;
- p_dev->io.NumPorts1 = io->win[i].len;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- if (p_dev->io.NumPorts1 + p_dev->io.NumPorts2 >= 32)
+ p_dev->resource[0]->start = io->win[i].base;
+ p_dev->resource[0]->end = io->win[i].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
return try_io_port(p_dev);
return 0;
@@ -557,7 +551,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (!link->irq)
goto failed;
- if (link->io.NumPorts2 == 8) {
+ if (resource_size(link->resource[1]) == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
}
@@ -569,20 +563,20 @@ static int pcnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
if (info->flags & HAS_MISC_REG) {
if ((if_port == 1) || (if_port == 2))
dev->if_port = if_port;
else
- printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
+ pr_notice("invalid if_port requested\n");
} else {
dev->if_port = 0;
}
if ((link->conf.ConfigBase == 0x03c0) &&
(link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
- printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
- printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
+ pr_notice("this is an AX88190 card!\n");
+ pr_notice("use axnet_cs instead.\n");
goto failed;
}
@@ -597,8 +591,8 @@ static int pcnet_config(struct pcmcia_device *link)
local_hw_info = get_hwired(link);
if (local_hw_info == NULL) {
- printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
- " address for io base %#3lx\n", dev->base_addr);
+ pr_notice("unable to read hardware net address for io base %#3lx\n",
+ dev->base_addr);
goto failed;
}
@@ -630,9 +624,7 @@ static int pcnet_config(struct pcmcia_device *link)
ei_status.name = "NE2000";
ei_status.word16 = 1;
- ei_status.reset_8390 = &pcnet_reset_8390;
-
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ ei_status.reset_8390 = pcnet_reset_8390;
if (info->flags & (IS_DL10019|IS_DL10022))
mii_phy_probe(dev);
@@ -640,25 +632,25 @@ static int pcnet_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto failed;
}
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
- printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
- dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id);
+ netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
+ (info->flags & IS_DL10022) ? 22 : 19, id);
if (info->pna_phy)
- printk("PNA, ");
+ pr_cont("PNA, ");
} else {
- printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name);
+ netdev_info(dev, "NE2000 Compatible: ");
}
- printk("io %#3lx, irq %d,", dev->base_addr, dev->irq);
+ pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq);
if (info->flags & USE_SHMEM)
- printk (" mem %#5lx,", dev->mem_start);
+ pr_cont(" mem %#5lx,", dev->mem_start);
if (info->flags & HAS_MISC_REG)
- printk(" %s xcvr,", if_names[dev->if_port]);
- printk(" hw_addr %pM\n", dev->dev_addr);
+ pr_cont(" %s xcvr,", if_names[dev->if_port]);
+ pr_cont(" hw_addr %pM\n", dev->dev_addr);
return 0;
failed:
@@ -932,7 +924,7 @@ static void mii_phy_probe(struct net_device *dev)
phyid = tmp << 16;
phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
phyid &= MII_PHYID_REV_MASK;
- pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
+ netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid);
if (phyid == AM79C9XX_HOME_PHY) {
info->pna_phy = i;
} else if (phyid != AM79C9XX_ETH_PHY) {
@@ -956,7 +948,7 @@ static int pcnet_open(struct net_device *dev)
set_misc_reg(dev);
outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
- ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
+ ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev);
if (ret)
return ret;
@@ -965,7 +957,7 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
init_timer(&info->watchdog);
- info->watchdog.function = &ei_watchdog;
+ info->watchdog.function = ei_watchdog;
info->watchdog.data = (u_long)dev;
info->watchdog.expires = jiffies + HZ;
add_timer(&info->watchdog);
@@ -1018,8 +1010,8 @@ static void pcnet_reset_8390(struct net_device *dev)
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
if (i == 100)
- printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
- dev->name);
+ netdev_err(dev, "pcnet_reset_8390() did not complete.\n");
+
set_misc_reg(dev);
} /* pcnet_reset_8390 */
@@ -1035,8 +1027,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
else if ((map->port < 1) || (map->port > 2))
return -EINVAL;
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
NS8390_init(dev, 1);
}
return 0;
@@ -1071,7 +1062,7 @@ static void ei_watchdog(u_long arg)
this, we can limp along even if the interrupt is blocked */
if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
if (!info->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
ei_irq_wrapper(dev->irq, dev);
info->fast_poll = HZ;
}
@@ -1091,7 +1082,7 @@ static void ei_watchdog(u_long arg)
if (info->eth_phy) {
info->phy_id = info->eth_phy = 0;
} else {
- printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ netdev_info(dev, "MII is missing!\n");
info->flags &= ~HAS_MII;
}
goto reschedule;
@@ -1100,8 +1091,7 @@ static void ei_watchdog(u_long arg)
link &= 0x0004;
if (link != info->link_status) {
u_short p = mdio_read(mii_addr, info->phy_id, 5);
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (link) ? "found" : "lost");
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
if (link && (info->flags & IS_DL10022)) {
/* Disable collision detection on full duplex links */
outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
@@ -1112,13 +1102,12 @@ static void ei_watchdog(u_long arg)
if (link) {
if (info->phy_id == info->eth_phy) {
if (p)
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
+ netdev_info(dev, "autonegotiation complete: "
+ "%sbaseT-%cD selected\n",
((p & 0x0180) ? "100" : "10"),
((p & 0x0140) ? 'F' : 'H'));
else
- printk(KERN_INFO "%s: link partner did not "
- "autonegotiate\n", dev->name);
+ netdev_info(dev, "link partner did not autonegotiate\n");
}
NS8390_init(dev, 1);
}
@@ -1131,7 +1120,7 @@ static void ei_watchdog(u_long arg)
/* isolate this MII and try flipping to the other one */
mdio_write(mii_addr, info->phy_id, 0, 0x0400);
info->phy_id ^= info->pna_phy ^ info->eth_phy;
- printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name,
+ netdev_info(dev, "switched to %s transceiver\n",
(info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
mdio_write(mii_addr, info->phy_id, 0,
(info->phy_id == info->eth_phy) ? 0x1000 : 0);
@@ -1147,18 +1136,6 @@ reschedule:
/*====================================================================*/
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "pcnet_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
-/*====================================================================*/
-
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -1191,9 +1168,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
- printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ netdev_notice(dev, "DMAing conflict in dma_block_input."
"[DMAstat:%1x][irqlock:%1x]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
@@ -1224,11 +1201,11 @@ static void dma_block_input(struct net_device *dev, int count,
char *buf = skb->data;
if ((ei_debug > 4) && (count != 4))
- pr_debug("%s: [bi=%d]\n", dev->name, count+4);
+ netdev_dbg(dev, "[bi=%d]\n", count+4);
if (ei_status.dmaing) {
- printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ netdev_notice(dev, "DMAing conflict in dma_block_input."
"[DMAstat:%1x][irqlock:%1x]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1258,9 +1235,9 @@ static void dma_block_input(struct net_device *dev, int count,
break;
} while (--tries > 0);
if (tries <= 0)
- printk(KERN_NOTICE "%s: RX transfer address mismatch,"
+ netdev_notice(dev, "RX transfer address mismatch,"
"%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
+ ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1281,7 +1258,7 @@ static void dma_block_output(struct net_device *dev, int count,
#ifdef PCMCIA_DEBUG
if (ei_debug > 4)
- printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+ netdev_dbg(dev, "[bo=%d]\n", count);
#endif
/* Round the count up for word writes. Do we need to do this?
@@ -1290,9 +1267,9 @@ static void dma_block_output(struct net_device *dev, int count,
if (count & 0x01)
count++;
if (ei_status.dmaing) {
- printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output."
+ netdev_notice(dev, "DMAing conflict in dma_block_output."
"[DMAstat:%1x][irqlock:%1x]\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
ei_status.dmaing |= 0x01;
@@ -1329,9 +1306,9 @@ static void dma_block_output(struct net_device *dev, int count,
break;
} while (--tries > 0);
if (tries <= 0) {
- printk(KERN_NOTICE "%s: Tx packet transfer address mismatch,"
+ netdev_notice(dev, "Tx packet transfer address mismatch,"
"%#4.4x (expected) vs. %#4.4x (actual).\n",
- dev->name, (start_page << 8) + count, addr);
+ (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
@@ -1340,8 +1317,7 @@ static void dma_block_output(struct net_device *dev, int count,
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
- printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n",
- dev->name);
+ netdev_notice(dev, "timeout waiting for Tx RDC.\n");
pcnet_reset_8390(dev);
NS8390_init(dev, 1);
break;
@@ -1365,9 +1341,9 @@ static int setup_dma_config(struct pcmcia_device *link, int start_pg,
ei_status.stop_page = stop_pg;
/* set up block i/o functions */
- ei_status.get_8390_hdr = &dma_get_8390_hdr;
- ei_status.block_input = &dma_block_input;
- ei_status.block_output = &dma_block_output;
+ ei_status.get_8390_hdr = dma_get_8390_hdr;
+ ei_status.block_input = dma_block_input;
+ ei_status.block_output = dma_block_output;
return 0;
}
@@ -1464,7 +1440,6 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
struct net_device *dev = link->priv;
pcnet_dev_t *info = PRIV(dev);
win_req_t req;
- memreq_t mem;
int i, window_size, offset, ret;
window_size = (stop_pg - start_pg) << 8;
@@ -1483,11 +1458,9 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
if (ret)
goto failed;
- mem.CardOffset = (start_pg << 8) + cm_offset;
- offset = mem.CardOffset % window_size;
- mem.CardOffset -= offset;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
+ offset = (start_pg << 8) + cm_offset;
+ offset -= offset % window_size;
+ ret = pcmcia_map_mem_page(link, link->win, offset);
if (ret)
goto failed;
@@ -1516,9 +1489,9 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
/* set up block i/o functions */
- ei_status.get_8390_hdr = &shmem_get_8390_hdr;
- ei_status.block_input = &shmem_block_input;
- ei_status.block_output = &shmem_block_output;
+ ei_status.get_8390_hdr = shmem_get_8390_hdr;
+ ei_status.block_input = shmem_block_input;
+ ei_status.block_output = shmem_block_output;
info->flags |= USE_SHMEM;
return 0;
@@ -1644,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+ PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 307cd1721e91..3d1c549b7038 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -25,6 +25,8 @@
======================================================================*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -44,7 +46,6 @@
#include <linux/jiffies.h>
#include <linux/firmware.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -295,7 +296,7 @@ static const struct net_device_ops smc_netdev_ops = {
.ndo_tx_timeout = smc_tx_timeout,
.ndo_set_config = s9k_config,
.ndo_set_multicast_list = set_rx_mode,
- .ndo_do_ioctl = &smc_ioctl,
+ .ndo_do_ioctl = smc_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -325,9 +326,8 @@ static int smc91c92_probe(struct pcmcia_device *link)
link->priv = dev;
spin_lock_init(&smc->lock);
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- link->io.IOAddrLines = 4;
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -428,12 +428,13 @@ static int mhz_mfc_config_check(struct pcmcia_device *p_dev,
void *priv_data)
{
int k;
- p_dev->io.BasePort2 = cf->io.win[0].base;
+ p_dev->resource[1]->start = cf->io.win[0].base;
for (k = 0; k < 0x400; k += 0x10) {
if (k & 0x80)
continue;
- p_dev->io.BasePort1 = k ^ 0x300;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[0]->start = k ^ 0x300;
+ p_dev->io_lines = 16;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -444,21 +445,20 @@ static int mhz_mfc_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
win_req_t req;
- memreq_t mem;
+ unsigned int offset;
int i;
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->io.IOAddrLines = 16;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts2 = 8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
/* The Megahertz combo cards have modem-like CIS entries, so
we have to explicitly try a bunch of port combinations. */
if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL))
return -ENODEV;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
/* Allocate a memory window, for accessing the ISR */
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
@@ -469,11 +469,8 @@ static int mhz_mfc_config(struct pcmcia_device *link)
return -ENODEV;
smc->base = ioremap(req.Base, req.Size);
- mem.CardOffset = mem.Page = 0;
- if (smc->manfid == MANFID_MOTOROLA)
- mem.CardOffset = link->conf.ConfigBase;
- i = pcmcia_map_mem_page(link, link->win, &mem);
-
+ offset = (smc->manfid == MANFID_MOTOROLA) ? link->conf.ConfigBase : 0;
+ i = pcmcia_map_mem_page(link, link->win, offset);
if ((i == 0) &&
(smc->manfid == MANFID_MEGAHERTZ) &&
(smc->cardid == PRODID_MEGAHERTZ_EM3288))
@@ -546,7 +543,7 @@ static void mot_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
- unsigned int iouart = link->io.BasePort2;
+ unsigned int iouart = link->resource[1]->start;
/* Set UART base address and force map with COR bit 1 */
writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
@@ -602,9 +599,9 @@ static int smc_configcheck(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.BasePort1 = cf->io.win[0].base;
- p_dev->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
- return pcmcia_request_io(p_dev, &p_dev->io);
+ p_dev->resource[0]->start = cf->io.win[0].base;
+ p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ return pcmcia_request_io(p_dev);
}
static int smc_config(struct pcmcia_device *link)
@@ -612,10 +609,10 @@ static int smc_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
int i;
- link->io.NumPorts1 = 16;
+ link->resource[0]->end = 16;
i = pcmcia_loop_config(link, smc_configcheck, NULL);
if (!i)
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
return i;
}
@@ -647,27 +644,27 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->io.NumPorts1 = 64;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->io.NumPorts2 = 8;
- link->io.IOAddrLines = 16;
+ link->resource[0]->end = 64;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
/* Enable Hard Decode, LAN, Modem */
link->conf.ConfigIndex = 0x23;
+ link->io_lines = 16;
for (i = j = 0; j < 4; j++) {
- link->io.BasePort2 = com[j];
- i = pcmcia_request_io(link, &link->io);
+ link->resource[1]->start = com[j];
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
if (i != 0) {
/* Fallback: turn off hard decode */
link->conf.ConfigIndex = 0x03;
- link->io.NumPorts2 = 0;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[1]->end = 0;
+ i = pcmcia_request_io(link);
}
- dev->base_addr = link->io.BasePort1 + 0x10;
+ dev->base_addr = link->resource[0]->start + 0x10;
return i;
}
@@ -684,7 +681,7 @@ static int osi_load_firmware(struct pcmcia_device *link)
/* Download the Seven of Diamonds firmware */
for (i = 0; i < fw->size; i++) {
- outb(fw->data[i], link->io.BasePort1 + 2);
+ outb(fw->data[i], link->resource[0]->start + 2);
udelay(50);
}
release_firmware(fw);
@@ -726,12 +723,12 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
return rc;
} else if (manfid == MANFID_OSITECH) {
/* Make sure both functions are powered up */
- set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
+ set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR);
/* Now, turn on the interrupt for both card functions */
- set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
+ set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR);
dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
- inw(link->io.BasePort1 + OSITECH_AUI_PWR),
- inw(link->io.BasePort1 + OSITECH_RESET_ISR));
+ inw(link->resource[0]->start + OSITECH_AUI_PWR),
+ inw(link->resource[0]->start + OSITECH_RESET_ISR));
}
return 0;
}
@@ -804,7 +801,7 @@ static int check_sig(struct pcmcia_device *link)
}
/* Try setting bus width */
- width = (link->io.Attributes1 == IO_DATA_PATH_WIDTH_AUTO);
+ width = (link->resource[0]->flags == IO_DATA_PATH_WIDTH_AUTO);
s = inb(ioaddr + CONFIG);
if (width)
s |= CFG_16BIT;
@@ -825,7 +822,7 @@ static int check_sig(struct pcmcia_device *link)
modconf_t mod = {
.Attributes = CONF_IO_CHANGE_WIDTH,
};
- printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
+ pr_info("using 8-bit IO window\n");
smc91c92_suspend(link);
pcmcia_modify_configuration(link, &mod);
@@ -886,7 +883,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if ((if_port >= 0) && (if_port <= 2))
dev->if_port = if_port;
else
- printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n");
+ dev_notice(&link->dev, "invalid if_port requested\n");
switch (smc->manfid) {
case MANFID_OSITECH:
@@ -904,7 +901,7 @@ static int smc91c92_config(struct pcmcia_device *link)
}
if (i != 0) {
- printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
+ dev_notice(&link->dev, "Unable to find hardware address.\n");
goto config_failed;
}
@@ -957,30 +954,28 @@ static int smc91c92_config(struct pcmcia_device *link)
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
- printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
+ dev_err(&link->dev, "register_netdev() failed\n");
goto config_undo;
}
- printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n",
+ name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr);
if (rev > 0) {
if (mir & 0x3ff)
- printk(KERN_INFO " %lu byte", mir);
+ netdev_info(dev, " %lu byte", mir);
else
- printk(KERN_INFO " %lu kb", mir>>10);
- printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ?
- "MII" : if_names[dev->if_port]);
+ netdev_info(dev, " %lu kb", mir>>10);
+ pr_cont(" buffer, %s xcvr\n",
+ (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]);
}
if (smc->cfg & CFG_MII_SELECT) {
if (smc->mii_if.phy_id != -1) {
- dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n",
- smc->mii_if.phy_id, j);
+ netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
+ smc->mii_if.phy_id, j);
} else {
- printk(KERN_NOTICE " No MII transceivers found!\n");
+ netdev_notice(dev, " No MII transceivers found!\n");
}
}
return 0;
@@ -1086,10 +1081,10 @@ static void smc_dump(struct net_device *dev)
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
SMC_SELECT_BANK(w);
- printk(KERN_DEBUG "bank %d: ", w);
+ netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
for (i = 0; i < 14; i += 2)
- printk(" %04x", inw(ioaddr + i));
- printk("\n");
+ pr_cont(" %04x", inw(ioaddr + i));
+ pr_cont("\n");
}
outw(save, ioaddr + BANK_SELECT);
}
@@ -1111,7 +1106,7 @@ static int smc_open(struct net_device *dev)
return -ENODEV;
/* Physical device present signature. */
if (check_sig(link) < 0) {
- printk("smc91c92_cs: Yikes! Bad chip signature!\n");
+ netdev_info(dev, "Yikes! Bad chip signature!\n");
return -ENODEV;
}
link->open++;
@@ -1122,7 +1117,7 @@ static int smc_open(struct net_device *dev)
smc_reset(dev);
init_timer(&smc->media);
- smc->media.function = &media_check;
+ smc->media.function = media_check;
smc->media.data = (u_long) dev;
smc->media.expires = jiffies + HZ;
add_timer(&smc->media);
@@ -1177,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
u_char packet_no;
if (!skb) {
- printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name);
+ netdev_err(dev, "In XMIT with no packet to send\n");
return;
}
@@ -1185,8 +1180,8 @@ static void smc_hardware_send_packet(struct net_device * dev)
packet_no = inw(ioaddr + PNR_ARR) >> 8;
if (packet_no & 0x80) {
/* If not, there is a hardware problem! Likely an ejected card. */
- printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation"
- " failed, status %#2.2x.\n", dev->name, packet_no);
+ netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n",
+ packet_no);
dev_kfree_skb_irq(skb);
smc->saved_skb = NULL;
netif_start_queue(dev);
@@ -1205,8 +1200,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
u_char *buf = skb->data;
u_int length = skb->len; /* The chip will pad to ethernet min. */
- pr_debug("%s: Trying to xmit packet of length %d.\n",
- dev->name, length);
+ netdev_dbg(dev, "Trying to xmit packet of length %d\n", length);
/* send the packet length: +6 for status word, length, and ctl */
outw(0, ioaddr + DATA_1);
@@ -1238,9 +1232,8 @@ static void smc_tx_timeout(struct net_device *dev)
struct smc_private *smc = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
- printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
- "Tx_status %2.2x status %4.4x.\n",
- dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
+ netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n",
+ inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1259,14 +1252,14 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
- pr_debug("%s: smc_start_xmit(length = %d) called,"
- " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
+ netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n",
+ skb->len, inw(ioaddr + 2));
if (smc->saved_skb) {
/* THIS SHOULD NEVER HAPPEN. */
dev->stats.tx_aborted_errors++;
- printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
- dev->name);
+ netdev_printk(KERN_DEBUG, dev,
+ "Internal error -- sent packet while busy\n");
return NETDEV_TX_BUSY;
}
smc->saved_skb = skb;
@@ -1274,7 +1267,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
num_pages = skb->len >> 8;
if (num_pages > 7) {
- printk(KERN_ERR "%s: Far too big packet error.\n", dev->name);
+ netdev_err(dev, "Far too big packet error: %d pages\n", num_pages);
dev_kfree_skb (skb);
smc->saved_skb = NULL;
dev->stats.tx_dropped++;
@@ -1344,8 +1337,7 @@ static void smc_tx_err(struct net_device * dev)
}
if (tx_status & TS_SUCCESS) {
- printk(KERN_NOTICE "%s: Successful packet caused error "
- "interrupt?\n", dev->name);
+ netdev_notice(dev, "Successful packet caused error interrupt?\n");
}
/* re-enable transmit */
SMC_SELECT_BANK(0);
@@ -1535,8 +1527,7 @@ static void smc_rx(struct net_device *dev)
/* Assertion: we are in Window 2. */
if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
- printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n",
- dev->name);
+ netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n");
return;
}
@@ -1651,8 +1642,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
else if (map->port > 2)
return -EINVAL;
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
smc_reset(dev);
}
return 0;
@@ -1803,7 +1793,7 @@ static void media_check(u_long arg)
this, we can limp along even if the interrupt is blocked */
if (smc->watchdog++ && ((i>>8) & i)) {
if (!smc->fast_poll)
- printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_info(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
smc_interrupt(dev->irq, dev);
local_irq_restore(flags);
@@ -1827,7 +1817,7 @@ static void media_check(u_long arg)
SMC_SELECT_BANK(3);
link = mdio_read(dev, smc->mii_if.phy_id, 1);
if (!link || (link == 0xffff)) {
- printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ netdev_info(dev, "MII is missing!\n");
smc->mii_if.phy_id = -1;
goto reschedule;
}
@@ -1835,15 +1825,13 @@ static void media_check(u_long arg)
link &= 0x0004;
if (link != smc->link_status) {
u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (link) ? "found" : "lost");
+ netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
? TCR_FDUPLX : 0);
if (link) {
- printk(KERN_INFO "%s: autonegotiation complete: "
- "%sbaseT-%cD selected\n", dev->name,
- ((p & 0x0180) ? "100" : "10"),
- (smc->duplex ? 'F' : 'H'));
+ netdev_info(dev, "autonegotiation complete: "
+ "%dbaseT-%cD selected\n",
+ (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H');
}
SMC_SELECT_BANK(0);
outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
@@ -1862,25 +1850,23 @@ static void media_check(u_long arg)
if (media != smc->media_status) {
if ((media & smc->media_status & 1) &&
((smc->media_status ^ media) & EPH_LINK_OK))
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (smc->media_status & EPH_LINK_OK ? "lost" : "found"));
+ netdev_info(dev, "%s link beat\n",
+ smc->media_status & EPH_LINK_OK ? "lost" : "found");
else if ((media & smc->media_status & 2) &&
((smc->media_status ^ media) & EPH_16COL))
- printk(KERN_INFO "%s: coax cable %s\n", dev->name,
- (media & EPH_16COL ? "problem" : "ok"));
+ netdev_info(dev, "coax cable %s\n",
+ media & EPH_16COL ? "problem" : "ok");
if (dev->if_port == 0) {
if (media & 1) {
if (media & EPH_LINK_OK)
- printk(KERN_INFO "%s: flipped to 10baseT\n",
- dev->name);
+ netdev_info(dev, "flipped to 10baseT\n");
else
smc_set_xcvr(dev, 2);
} else {
if (media & EPH_16COL)
smc_set_xcvr(dev, 1);
else
- printk(KERN_INFO "%s: flipped to 10base2\n",
- dev->name);
+ netdev_info(dev, "flipped to 10base2\n");
}
}
smc->media_status = media;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index b6c3644888cd..d858b5e4c4a7 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -63,6 +63,8 @@
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -82,7 +84,6 @@
#include <linux/bitops.h>
#include <linux/mii.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -211,13 +212,6 @@ enum xirc_cmd { /* Commands */
static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
-
-#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
-#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
-#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
-#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
-#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
-
/* card types */
#define XIR_UNKNOWN 0 /* unknown: not supported */
#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
@@ -351,26 +345,26 @@ PrintRegisters(struct net_device *dev)
if (pc_debug > 1) {
int i, page;
- printk(KDBG_XIRC "Register common: ");
+ printk(KERN_DEBUG pr_fmt("Register common: "));
for (i = 0; i < 8; i++)
- printk(" %2.2x", GetByte(i));
- printk("\n");
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
for (page = 0; page <= 8; page++) {
- printk(KDBG_XIRC "Register page %2x: ", page);
+ printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
SelectPage(page);
for (i = 8; i < 16; i++)
- printk(" %2.2x", GetByte(i));
- printk("\n");
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
}
for (page=0x40 ; page <= 0x5f; page++) {
if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
(page >= 0x51 && page <=0x5e))
continue;
- printk(KDBG_XIRC "Register page %2x: ", page);
+ printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
SelectPage(page);
for (i = 8; i < 16; i++)
- printk(" %2.2x", GetByte(i));
- printk("\n");
+ pr_cont(" %2.2x", GetByte(i));
+ pr_cont("\n");
}
}
}
@@ -609,11 +603,11 @@ set_card_type(struct pcmcia_device *link)
local->modem = 0;
local->card_type = XIR_UNKNOWN;
if (!(prodid & 0x40)) {
- printk(KNOT_XIRC "Ooops: Not a creditcard\n");
+ pr_notice("Oops: Not a creditcard\n");
return 0;
}
if (!(mediaid & 0x01)) {
- printk(KNOT_XIRC "Not an Ethernet card\n");
+ pr_notice("Not an Ethernet card\n");
return 0;
}
if (mediaid & 0x10) {
@@ -644,12 +638,11 @@ set_card_type(struct pcmcia_device *link)
}
}
if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
- printk(KNOT_XIRC "Sorry, this is an old CE card\n");
+ pr_notice("Sorry, this is an old CE card\n");
return 0;
}
if (local->card_type == XIR_UNKNOWN)
- printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n",
- mediaid, prodid);
+ pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid);
return 1;
}
@@ -678,9 +671,9 @@ xirc2ps_config_modem(struct pcmcia_device *p_dev,
if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
- p_dev->io.BasePort2 = cf->io.win[0].base;
- p_dev->io.BasePort1 = ioaddr;
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ p_dev->resource[1]->start = cf->io.win[0].base;
+ p_dev->resource[0]->start = ioaddr;
+ if (!pcmcia_request_io(p_dev))
return 0;
}
}
@@ -697,11 +690,11 @@ xirc2ps_config_check(struct pcmcia_device *p_dev,
int *pass = priv_data;
if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
- p_dev->io.BasePort2 = cf->io.win[0].base;
- p_dev->io.BasePort1 = p_dev->io.BasePort2
+ p_dev->resource[1]->start = cf->io.win[0].base;
+ p_dev->resource[0]->start = p_dev->resource[1]->start
+ (*pass ? (cf->index & 0x20 ? -24:8)
: (cf->index & 0x20 ? 8:-24));
- if (!pcmcia_request_io(p_dev, &p_dev->io))
+ if (!pcmcia_request_io(p_dev))
return 0;
}
return -ENODEV;
@@ -749,7 +742,7 @@ xirc2ps_config(struct pcmcia_device * link)
/* Is this a valid card */
if (link->has_manf_id == 0) {
- printk(KNOT_XIRC "manfid not found in CIS\n");
+ pr_notice("manfid not found in CIS\n");
goto failure;
}
@@ -771,14 +764,14 @@ xirc2ps_config(struct pcmcia_device * link)
local->manf_str = "Toshiba";
break;
default:
- printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
- (unsigned)link->manf_id);
+ pr_notice("Unknown Card Manufacturer ID: 0x%04x\n",
+ (unsigned)link->manf_id);
goto failure;
}
dev_dbg(&link->dev, "found %s card\n", local->manf_str);
if (!set_card_type(link)) {
- printk(KNOT_XIRC "this card is not supported\n");
+ pr_notice("this card is not supported\n");
goto failure;
}
@@ -804,12 +797,12 @@ xirc2ps_config(struct pcmcia_device * link)
err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
if (err) {
- printk(KNOT_XIRC "node-id not found in CIS\n");
+ pr_notice("node-id not found in CIS\n");
goto failure;
}
- link->io.IOAddrLines =10;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ link->io_lines = 10;
if (local->modem) {
int pass;
@@ -817,16 +810,16 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->io.NumPorts2 = 8;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->resource[1]->end = 8;
+ link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
/* Take the Modem IO port from the CIS and scan for a free
* Ethernet port */
- link->io.NumPorts1 = 16; /* no Mako stuff anymore */
+ link->resource[0]->end = 16; /* no Mako stuff anymore */
if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL))
goto port_found;
} else {
- link->io.NumPorts1 = 18;
+ link->resource[0]->end = 18;
/* We do 2 passes here: The first one uses the regular mapping and
* the second tries again, thereby considering that the 32 ports are
* mirrored every 32 bytes. Actually we use a mirrored port for
@@ -839,16 +832,16 @@ xirc2ps_config(struct pcmcia_device * link)
* try to configure as Ethernet only.
* .... */
}
- printk(KNOT_XIRC "no ports available\n");
+ pr_notice("no ports available\n");
} else {
- link->io.NumPorts1 = 16;
+ link->resource[0]->end = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
- link->io.BasePort1 = ioaddr;
- if (!(err=pcmcia_request_io(link, &link->io)))
+ link->resource[0]->start = ioaddr;
+ if (!(err = pcmcia_request_io(link)))
goto port_found;
}
- link->io.BasePort1 = 0; /* let CS decide */
- if ((err=pcmcia_request_io(link, &link->io)))
+ link->resource[0]->start = 0; /* let CS decide */
+ if ((err = pcmcia_request_io(link)))
goto config_error;
}
port_found:
@@ -870,24 +863,21 @@ xirc2ps_config(struct pcmcia_device * link)
goto config_error;
if (local->dingo) {
- conf_reg_t reg;
win_req_t req;
- memreq_t mem;
/* Reset the modem's BAR to the correct value
* This is necessary because in the RequestConfiguration call,
* the base address of the ethernet port (BasePort1) is written
* to the BAR registers of the modem.
*/
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_IOBASE_0;
- reg.Value = link->io.BasePort2 & 0xff;
- if ((err = pcmcia_access_configuration_register(link, &reg)))
+ err = pcmcia_write_config_byte(link, CISREG_IOBASE_0, (u8)
+ link->resource[1]->start & 0xff);
+ if (err)
goto config_error;
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_IOBASE_1;
- reg.Value = (link->io.BasePort2 >> 8) & 0xff;
- if ((err = pcmcia_access_configuration_register(link, &reg)))
+
+ err = pcmcia_write_config_byte(link, CISREG_IOBASE_1,
+ (link->resource[1]->start >> 8) & 0xff);
+ if (err)
goto config_error;
/* There is no config entry for the Ethernet part which
@@ -901,40 +891,38 @@ xirc2ps_config(struct pcmcia_device * link)
goto config_error;
local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
- mem.CardOffset = 0x0;
- mem.Page = 0;
- if ((err = pcmcia_map_mem_page(link, link->win, &mem)))
+ if ((err = pcmcia_map_mem_page(link, link->win, 0)))
goto config_error;
/* Setup the CCRs; there are no infos in the CIS about the Ethernet
* part.
*/
writeb(0x47, local->dingo_ccr + CISREG_COR);
- ioaddr = link->io.BasePort1;
+ ioaddr = link->resource[0]->start;
writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0);
writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1);
#if 0
{
u_char tmp;
- printk(KERN_INFO "ECOR:");
+ pr_info("ECOR:");
for (i=0; i < 7; i++) {
tmp = readb(local->dingo_ccr + i*2);
- printk(" %02x", tmp);
+ pr_cont(" %02x", tmp);
}
- printk("\n");
- printk(KERN_INFO "DCOR:");
+ pr_cont("\n");
+ pr_info("DCOR:");
for (i=0; i < 4; i++) {
tmp = readb(local->dingo_ccr + 0x20 + i*2);
- printk(" %02x", tmp);
+ pr_cont(" %02x", tmp);
}
- printk("\n");
- printk(KERN_INFO "SCOR:");
+ pr_cont("\n");
+ pr_info("SCOR:");
for (i=0; i < 10; i++) {
tmp = readb(local->dingo_ccr + 0x40 + i*2);
- printk(" %02x", tmp);
+ pr_cont(" %02x", tmp);
}
- printk("\n");
+ pr_cont("\n");
}
#endif
@@ -953,11 +941,11 @@ xirc2ps_config(struct pcmcia_device * link)
(local->mohawk && if_port==4))
dev->if_port = if_port;
else
- printk(KNOT_XIRC "invalid if_port requested\n");
+ pr_notice("invalid if_port requested\n");
/* we can now register the device with the net subsystem */
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
if (local->dingo)
do_reset(dev, 1); /* a kludge to make the cem56 work */
@@ -965,14 +953,14 @@ xirc2ps_config(struct pcmcia_device * link)
SET_NETDEV_DEV(dev, &link->dev);
if ((err=register_netdev(dev))) {
- printk(KNOT_XIRC "register_netdev() failed\n");
+ pr_notice("register_netdev() failed\n");
goto config_error;
}
/* give some infos about the hardware */
- printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
- dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
- dev->dev_addr);
+ netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n",
+ local->manf_str, (u_long)dev->base_addr, (int)dev->irq,
+ dev->dev_addr);
return 0;
@@ -1104,8 +1092,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
if (!skb) {
- printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n",
- pktlen);
+ pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
dev->stats.rx_dropped++;
} else { /* okay get the packet */
skb_reserve(skb, 2);
@@ -1274,7 +1261,7 @@ xirc_tx_timeout(struct net_device *dev)
{
local_info_t *lp = netdev_priv(dev);
dev->stats.tx_errors++;
- printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ netdev_notice(dev, "transmit timed out\n");
schedule_work(&lp->tx_timeout_task);
}
@@ -1441,8 +1428,7 @@ do_config(struct net_device *dev, struct ifmap *map)
local->probe_port = 0;
dev->if_port = map->port;
}
- printk(KERN_INFO "%s: switching to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
do_reset(dev,1); /* not the fine way :-) */
}
return 0;
@@ -1582,7 +1568,7 @@ do_reset(struct net_device *dev, int full)
{
SelectPage(0);
value = GetByte(XIRCREG_ESR); /* read the ESR */
- printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
+ pr_debug("%s: ESR is: %#02x\n", dev->name, value);
}
#endif
@@ -1632,13 +1618,12 @@ do_reset(struct net_device *dev, int full)
if (full && local->mohawk && init_mii(dev)) {
if (dev->if_port == 4 || local->dingo || local->new_mii) {
- printk(KERN_INFO "%s: MII selected\n", dev->name);
+ netdev_info(dev, "MII selected\n");
SelectPage(2);
PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
msleep(20);
} else {
- printk(KERN_INFO "%s: MII detected; using 10mbs\n",
- dev->name);
+ netdev_info(dev, "MII detected; using 10mbs\n");
SelectPage(0x42);
if (dev->if_port == 2) /* enable 10Base2 */
PutByte(XIRCREG42_SWC1, 0xC0);
@@ -1683,8 +1668,8 @@ do_reset(struct net_device *dev, int full)
}
if (full)
- printk(KERN_INFO "%s: media %s, silicon revision %d\n",
- dev->name, if_names[dev->if_port], local->silicon);
+ netdev_info(dev, "media %s, silicon revision %d\n",
+ if_names[dev->if_port], local->silicon);
/* We should switch back to page 0 to avoid a bug in revision 0
* where regs with offset below 8 can't be read after an access
* to the MAC registers */
@@ -1726,8 +1711,7 @@ init_mii(struct net_device *dev)
control = mii_rd(ioaddr, 0, 0);
if (control & 0x0400) {
- printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n",
- dev->name);
+ netdev_notice(dev, "can't take PHY out of isolation mode\n");
local->probe_port = 0;
return 0;
}
@@ -1745,8 +1729,7 @@ init_mii(struct net_device *dev)
}
if (!(status & 0x0020)) {
- printk(KERN_INFO "%s: autonegotiation failed;"
- " using 10mbs\n", dev->name);
+ netdev_info(dev, "autonegotiation failed; using 10mbs\n");
if (!local->new_mii) {
control = 0x0000;
mii_wr(ioaddr, 0, 0, control, 16);
@@ -1756,8 +1739,7 @@ init_mii(struct net_device *dev)
}
} else {
linkpartner = mii_rd(ioaddr, 0, 5);
- printk(KERN_INFO "%s: MII link partner: %04x\n",
- dev->name, linkpartner);
+ netdev_info(dev, "MII link partner: %04x\n", linkpartner);
if (linkpartner & 0x0080) {
dev->if_port = 4;
} else
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a527e37728cd..eb799b36c86a 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,7 @@
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
depends on !S390
- depends on NET_ETHERNET
+ depends on NETDEVICES
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index fc5fef2a8175..f62c7b717bc8 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -188,7 +188,7 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
-static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
+static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct mdio_gpio_platform_data *pdata;
@@ -224,7 +224,7 @@ out_free:
return -ENODEV;
}
-static int __devexit mdio_ofgpio_remove(struct of_device *ofdev)
+static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
{
mdio_gpio_bus_destroy(&ofdev->dev);
kfree(ofdev->dev.platform_data);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5130db8f5c4e..1bb16cb79433 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
- * @mii_data: MII ioctl data
+ * @ifr: &struct ifreq for socket ioctl's
* @cmd: ioctl cmd to execute
*
* Note that this function is currently incompatible with the
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0761197c07e..16ddc77313cb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->interface = interface;
+ phydev->state = PHY_READY;
+
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index ec0349e84a8a..ca4df7f4cf21 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -995,8 +995,10 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
static void
plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
{
- const struct in_device *in_dev = dev->ip_ptr;
+ const struct in_device *in_dev;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
/* Any address will do - we take the first */
const struct in_ifaddr *ifa = in_dev->ifa_list;
@@ -1006,6 +1008,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
}
}
+ rcu_read_unlock();
}
static int
@@ -1088,7 +1091,8 @@ plip_open(struct net_device *dev)
when the device address isn't identical to the address of a
received frame, the kernel incorrectly drops it). */
- if ((in_dev=dev->ip_ptr) != NULL) {
+ in_dev=__in_dev_get_rtnl(dev);
+ if (in_dev) {
/* Any address will do - we take the first. We already
have the first two bytes filled with 0xfc, from
plip_init_dev(). */
@@ -1279,7 +1283,6 @@ static void plip_attach (struct parport *port)
if (!nl->pardev) {
printk(KERN_ERR "%s: parport_register failed\n", name);
goto err_free_dev;
- return;
}
plip_init_netdev(dev);
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
new file mode 100644
index 000000000000..761f0eced724
--- /dev/null
+++ b/drivers/net/pptp.c
@@ -0,0 +1,726 @@
+/*
+ * Point-to-Point Tunneling Protocol for Linux
+ *
+ * Authors: Dmitry Kozlov <xeb@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_pppox.h>
+#include <linux/if_ppp.h>
+#include <linux/notifier.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/version.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/gre.h>
+
+#include <linux/uaccess.h>
+
+#define PPTP_DRIVER_VERSION "0.8.5"
+
+#define MAX_CALLID 65535
+
+static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
+static struct pppox_sock **callid_sock;
+
+static DEFINE_SPINLOCK(chan_lock);
+
+static struct proto pptp_sk_proto __read_mostly;
+static struct ppp_channel_ops pptp_chan_ops;
+static const struct proto_ops pptp_ops;
+
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define MISSING_WINDOW 20
+#define WRAPPED(curseq, lastseq)\
+ ((((curseq) & 0xffffff00) == 0) &&\
+ (((lastseq) & 0xffffff00) == 0xffffff00))
+
+#define PPTP_GRE_PROTO 0x880B
+#define PPTP_GRE_VER 0x1
+
+#define PPTP_GRE_FLAG_C 0x80
+#define PPTP_GRE_FLAG_R 0x40
+#define PPTP_GRE_FLAG_K 0x20
+#define PPTP_GRE_FLAG_S 0x10
+#define PPTP_GRE_FLAG_A 0x80
+
+#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
+#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
+#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
+#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
+#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
+
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+struct pptp_gre_header {
+ u8 flags;
+ u8 ver;
+ u16 protocol;
+ u16 payload_len;
+ u16 call_id;
+ u32 seq;
+ u32 ack;
+} __packed;
+
+static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
+{
+ struct pppox_sock *sock;
+ struct pptp_opt *opt;
+
+ rcu_read_lock();
+ sock = rcu_dereference(callid_sock[call_id]);
+ if (sock) {
+ opt = &sock->proto.pptp;
+ if (opt->dst_addr.sin_addr.s_addr != s_addr)
+ sock = NULL;
+ else
+ sock_hold(sk_pppox(sock));
+ }
+ rcu_read_unlock();
+
+ return sock;
+}
+
+static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+{
+ struct pppox_sock *sock;
+ struct pptp_opt *opt;
+ int i;
+
+ rcu_read_lock();
+ for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
+ i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+ opt = &sock->proto.pptp;
+ if (opt->dst_addr.call_id == call_id &&
+ opt->dst_addr.sin_addr.s_addr == d_addr)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i < MAX_CALLID;
+}
+
+static int add_chan(struct pppox_sock *sock)
+{
+ static int call_id;
+
+ spin_lock(&chan_lock);
+ if (!sock->proto.pptp.src_addr.call_id) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ if (call_id == MAX_CALLID) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ if (call_id == MAX_CALLID)
+ goto out_err;
+ }
+ sock->proto.pptp.src_addr.call_id = call_id;
+ } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+ goto out_err;
+
+ set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+ rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+ spin_unlock(&chan_lock);
+
+ return 0;
+
+out_err:
+ spin_unlock(&chan_lock);
+ return -1;
+}
+
+static void del_chan(struct pppox_sock *sock)
+{
+ spin_lock(&chan_lock);
+ clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+ rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+ spin_unlock(&chan_lock);
+ synchronize_rcu();
+}
+
+static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ struct sock *sk = (struct sock *) chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ struct pptp_gre_header *hdr;
+ unsigned int header_len = sizeof(*hdr);
+ int err = 0;
+ int islcp;
+ int len;
+ unsigned char *data;
+ __u32 seq_recv;
+
+
+ struct rtable *rt;
+ struct net_device *tdev;
+ struct iphdr *iph;
+ int max_headroom;
+
+ if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+ goto tx_error;
+
+ {
+ struct flowi fl = { .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = opt->dst_addr.sin_addr.s_addr,
+ .saddr = opt->src_addr.sin_addr.s_addr,
+ .tos = RT_TOS(0) } },
+ .proto = IPPROTO_GRE };
+ err = ip_route_output_key(&init_net, &rt, &fl);
+ if (err)
+ goto tx_error;
+ }
+ tdev = rt->dst.dev;
+
+ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
+
+ if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+ struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (!new_skb) {
+ ip_rt_put(rt);
+ goto tx_error;
+ }
+ if (skb->sk)
+ skb_set_owner_w(new_skb, skb->sk);
+ kfree_skb(skb);
+ skb = new_skb;
+ }
+
+ data = skb->data;
+ islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+ /* compress protocol field */
+ if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
+ skb_pull(skb, 1);
+
+ /* Put in the address/control bytes if necessary */
+ if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
+ data = skb_push(skb, 2);
+ data[0] = PPP_ALLSTATIONS;
+ data[1] = PPP_UI;
+ }
+
+ len = skb->len;
+
+ seq_recv = opt->seq_recv;
+
+ if (opt->ack_sent == seq_recv)
+ header_len -= sizeof(hdr->ack);
+
+ /* Push down and install GRE header */
+ skb_push(skb, header_len);
+ hdr = (struct pptp_gre_header *)(skb->data);
+
+ hdr->flags = PPTP_GRE_FLAG_K;
+ hdr->ver = PPTP_GRE_VER;
+ hdr->protocol = htons(PPTP_GRE_PROTO);
+ hdr->call_id = htons(opt->dst_addr.call_id);
+
+ hdr->flags |= PPTP_GRE_FLAG_S;
+ hdr->seq = htonl(++opt->seq_sent);
+ if (opt->ack_sent != seq_recv) {
+ /* send ack with this message */
+ hdr->ver |= PPTP_GRE_FLAG_A;
+ hdr->ack = htonl(seq_recv);
+ opt->ack_sent = seq_recv;
+ }
+ hdr->payload_len = htons(len);
+
+ /* Push down and install the IP header. */
+
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*iph));
+ skb_reset_network_header(skb);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ if (ip_dont_fragment(sk, &rt->dst))
+ iph->frag_off = htons(IP_DF);
+ else
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_GRE;
+ iph->tos = 0;
+ iph->daddr = rt->rt_dst;
+ iph->saddr = rt->rt_src;
+ iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+ iph->tot_len = htons(skb->len);
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+
+ nf_reset(skb);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(iph, &rt->dst, NULL);
+ ip_send_check(iph);
+
+ ip_local_out(skb);
+
+tx_error:
+ return 1;
+}
+
+static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ int headersize, payload_len, seq;
+ __u8 *payload;
+ struct pptp_gre_header *header;
+
+ if (!(sk->sk_state & PPPOX_CONNECTED)) {
+ if (sock_queue_rcv_skb(sk, skb))
+ goto drop;
+ return NET_RX_SUCCESS;
+ }
+
+ header = (struct pptp_gre_header *)(skb->data);
+
+ /* test if acknowledgement present */
+ if (PPTP_GRE_IS_A(header->ver)) {
+ __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
+ header->ack : header->seq; /* ack in different place if S = 0 */
+
+ ack = ntohl(ack);
+
+ if (ack > opt->ack_recv)
+ opt->ack_recv = ack;
+ /* also handle sequence number wrap-around */
+ if (WRAPPED(ack, opt->ack_recv))
+ opt->ack_recv = ack;
+ }
+
+ /* test if payload present */
+ if (!PPTP_GRE_IS_S(header->flags))
+ goto drop;
+
+ headersize = sizeof(*header);
+ payload_len = ntohs(header->payload_len);
+ seq = ntohl(header->seq);
+
+ /* no ack present? */
+ if (!PPTP_GRE_IS_A(header->ver))
+ headersize -= sizeof(header->ack);
+ /* check for incomplete packet (length smaller than expected) */
+ if (skb->len - headersize < payload_len)
+ goto drop;
+
+ payload = skb->data + headersize;
+ /* check for expected sequence number */
+ if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
+ if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
+ (PPP_PROTOCOL(payload) == PPP_LCP) &&
+ ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
+ goto allow_packet;
+ } else {
+ opt->seq_recv = seq;
+allow_packet:
+ skb_pull(skb, headersize);
+
+ if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
+ /* chop off address/control */
+ if (skb->len < 3)
+ goto drop;
+ skb_pull(skb, 2);
+ }
+
+ if ((*skb->data) & 1) {
+ /* protocol is compressed */
+ skb_push(skb, 1)[0] = 0;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_set_network_header(skb, skb->head-skb->data);
+ ppp_input(&po->chan, skb);
+
+ return NET_RX_SUCCESS;
+ }
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pptp_rcv(struct sk_buff *skb)
+{
+ struct pppox_sock *po;
+ struct pptp_gre_header *header;
+ struct iphdr *iph;
+
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ iph = ip_hdr(skb);
+
+ header = (struct pptp_gre_header *)skb->data;
+
+ if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */
+ PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */
+ PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */
+ !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */
+ (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */
+ /* if invalid, discard this packet */
+ goto drop;
+
+ po = lookup_chan(htons(header->call_id), iph->saddr);
+ if (po) {
+ skb_dst_drop(skb);
+ nf_reset(skb);
+ return sk_receive_skb(sk_pppox(po), skb, 0);
+ }
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ int error = 0;
+
+ lock_sock(sk);
+
+ opt->src_addr = sp->sa_addr.pptp;
+ if (add_chan(po)) {
+ release_sock(sk);
+ error = -EBUSY;
+ }
+
+ release_sock(sk);
+ return error;
+}
+
+static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ struct rtable *rt;
+ int error = 0;
+
+ if (sp->sa_protocol != PX_PROTO_PPTP)
+ return -EINVAL;
+
+ if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
+ return -EALREADY;
+
+ lock_sock(sk);
+ /* Check for already bound sockets */
+ if (sk->sk_state & PPPOX_CONNECTED) {
+ error = -EBUSY;
+ goto end;
+ }
+
+ /* Check for already disconnected sockets, on attempts to disconnect */
+ if (sk->sk_state & PPPOX_DEAD) {
+ error = -EALREADY;
+ goto end;
+ }
+
+ if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
+ error = -EINVAL;
+ goto end;
+ }
+
+ po->chan.private = sk;
+ po->chan.ops = &pptp_chan_ops;
+
+ {
+ struct flowi fl = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = opt->dst_addr.sin_addr.s_addr,
+ .saddr = opt->src_addr.sin_addr.s_addr,
+ .tos = RT_CONN_FLAGS(sk) } },
+ .proto = IPPROTO_GRE };
+ security_sk_classify_flow(sk, &fl);
+ if (ip_route_output_key(&init_net, &rt, &fl)) {
+ error = -EHOSTUNREACH;
+ goto end;
+ }
+ sk_setup_caps(sk, &rt->dst);
+ }
+ po->chan.mtu = dst_mtu(&rt->dst);
+ if (!po->chan.mtu)
+ po->chan.mtu = PPP_MTU;
+ ip_rt_put(rt);
+ po->chan.mtu -= PPTP_HEADER_OVERHEAD;
+
+ po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+ error = ppp_register_channel(&po->chan);
+ if (error) {
+ pr_err("PPTP: failed to register PPP channel (%d)\n", error);
+ goto end;
+ }
+
+ opt->dst_addr = sp->sa_addr.pptp;
+ sk->sk_state = PPPOX_CONNECTED;
+
+ end:
+ release_sock(sk);
+ return error;
+}
+
+static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = sizeof(struct sockaddr_pppox);
+ struct sockaddr_pppox sp;
+
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_PPTP;
+ sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
+
+ memcpy(uaddr, &sp, len);
+
+ *usockaddr_len = len;
+
+ return 0;
+}
+
+static int pptp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct pppox_sock *po;
+ struct pptp_opt *opt;
+ int error = 0;
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+
+ if (sock_flag(sk, SOCK_DEAD)) {
+ release_sock(sk);
+ return -EBADF;
+ }
+
+ po = pppox_sk(sk);
+ opt = &po->proto.pptp;
+ del_chan(po);
+
+ pppox_unbind_sock(sk);
+ sk->sk_state = PPPOX_DEAD;
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ release_sock(sk);
+ sock_put(sk);
+
+ return error;
+}
+
+static void pptp_sock_destruct(struct sock *sk)
+{
+ if (!(sk->sk_state & PPPOX_DEAD)) {
+ del_chan(pppox_sk(sk));
+ pppox_unbind_sock(sk);
+ }
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static int pptp_create(struct net *net, struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+ struct pppox_sock *po;
+ struct pptp_opt *opt;
+
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pptp_ops;
+
+ sk->sk_backlog_rcv = pptp_rcv_core;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_protocol = PX_PROTO_PPTP;
+ sk->sk_destruct = pptp_sock_destruct;
+
+ po = pppox_sk(sk);
+ opt = &po->proto.pptp;
+
+ opt->seq_sent = 0; opt->seq_recv = 0;
+ opt->ack_recv = 0; opt->ack_sent = 0;
+
+ error = 0;
+out:
+ return error;
+}
+
+static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = (struct sock *) chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct pptp_opt *opt = &po->proto.pptp;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int err, val;
+
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGFLAGS:
+ val = opt->ppp_flags;
+ if (put_user(val, p))
+ break;
+ err = 0;
+ break;
+ case PPPIOCSFLAGS:
+ if (get_user(val, p))
+ break;
+ opt->ppp_flags = val & ~SC_RCV_BITS;
+ err = 0;
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+static struct ppp_channel_ops pptp_chan_ops = {
+ .start_xmit = pptp_xmit,
+ .ioctl = pptp_ppp_ioctl,
+};
+
+static struct proto pptp_sk_proto __read_mostly = {
+ .name = "PPTP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+static const struct proto_ops pptp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pptp_release,
+ .bind = pptp_bind,
+ .connect = pptp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pptp_getname,
+ .poll = sock_no_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = sock_no_sendmsg,
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+};
+
+static struct pppox_proto pppox_pptp_proto = {
+ .create = pptp_create,
+ .owner = THIS_MODULE,
+};
+
+static struct gre_protocol gre_pptp_protocol = {
+ .handler = pptp_rcv,
+};
+
+static int __init pptp_init_module(void)
+{
+ int err = 0;
+ pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
+
+ callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ if (!callid_sock) {
+ pr_err("PPTP: cann't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+ if (err) {
+ pr_err("PPTP: can't add gre protocol\n");
+ goto out_mem_free;
+ }
+
+ err = proto_register(&pptp_sk_proto, 0);
+ if (err) {
+ pr_err("PPTP: can't register sk_proto\n");
+ goto out_gre_del_protocol;
+ }
+
+ err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
+ if (err) {
+ pr_err("PPTP: can't register pppox_proto\n");
+ goto out_unregister_sk_proto;
+ }
+
+ return 0;
+
+out_unregister_sk_proto:
+ proto_unregister(&pptp_sk_proto);
+out_gre_del_protocol:
+ gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+out_mem_free:
+ vfree(callid_sock);
+
+ return err;
+}
+
+static void __exit pptp_exit_module(void)
+{
+ unregister_pppox_proto(PX_PROTO_PPTP);
+ proto_unregister(&pptp_sk_proto);
+ gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
+ vfree(callid_sock);
+}
+
+module_init(pptp_init_module);
+module_exit(pptp_exit_module);
+
+MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 87d6b8f36304..5526ab4895e6 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -956,9 +956,9 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
(!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* update netdevice statistics */
netdev->stats.rx_packets++;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644
index 000000000000..75c2ff99d66d
--- /dev/null
+++ b/drivers/net/pxa168_eth.c
@@ -0,0 +1,1663 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Sachin Sanap <ssanap@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS 0x0000
+#define SMI 0x0010
+#define PORT_CONFIG 0x0400
+#define PORT_CONFIG_EXT 0x0408
+#define PORT_COMMAND 0x0410
+#define PORT_STATUS 0x0418
+#define HTPR 0x0428
+#define SDMA_CONFIG 0x0440
+#define SDMA_CMD 0x0448
+#define INT_CAUSE 0x0450
+#define INT_W_CLEAR 0x0454
+#define INT_MASK 0x0458
+#define ETH_F_RX_DESC_0 0x0480
+#define ETH_C_RX_DESC_0 0x04A0
+#define ETH_C_TX_DESC_1 0x04E4
+
+/* smi register */
+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
+#define SMI_OP_W (0 << 26) /* Write operation */
+#define SMI_OP_R (1 << 26) /* Read operation */
+
+#define PHY_WAIT_ITERATIONS 10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT 0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT (1 << 23)
+#define RX_FIRST_DESC (1 << 17)
+#define RX_LAST_DESC (1 << 16)
+#define RX_ERROR (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT (1 << 23)
+#define TX_GEN_CRC (1 << 22)
+#define TX_ZERO_PADDING (1 << 18)
+#define TX_FIRST_DESC (1 << 17)
+#define TX_LAST_DESC (1 << 16)
+#define TX_ERROR (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT (1 << 31)
+#define SDMA_CMD_TXDL (1 << 24)
+#define SDMA_CMD_TXDH (1 << 23)
+#define SDMA_CMD_AR (1 << 15)
+#define SDMA_CMD_ERD (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS (1 << 12)
+#define PCR_EN (1 << 7)
+#define PCR_PM (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM (1 << 28)
+#define PCXR_DSCP_EN (1 << 21)
+#define PCXR_MFL_1518 (0 << 14)
+#define PCXR_MFL_1536 (1 << 14)
+#define PCXR_MFL_2048 (2 << 14)
+#define PCXR_MFL_64K (3 << 14)
+#define PCXR_FLP (1 << 11)
+#define PCXR_PRIO_TX_OFF 3
+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF 12
+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR (1 << 6)
+#define SDCR_BLMT (1 << 7)
+#define SDCR_RIFB (1 << 9)
+#define SDCR_RC_OFF 2
+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF (1 << 0)
+#define ICR_TXBUF_H (1 << 2)
+#define ICR_TXBUF_L (1 << 3)
+#define ICR_TXEND_H (1 << 6)
+#define ICR_TXEND_L (1 << 7)
+#define ICR_RXERR (1 << 8)
+#define ICR_TXERR_H (1 << 10)
+#define ICR_TXERR_L (1 << 11)
+#define ICR_TX_UDR (1 << 13)
+#define ICR_MII_CH (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
+ ICR_TXERR_H | ICR_TXERR_L |\
+ ICR_TXEND_H | ICR_TXEND_L |\
+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+
+#define NUM_RX_DESCS 64
+#define NUM_TX_DESCS 64
+
+#define HASH_ADD 0
+#define HASH_DELETE 1
+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER 12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100 (1 << 0)
+#define FULL_DUPLEX (1 << 1)
+#define FLOW_CONTROL_ENABLED (1 << 2)
+#define LINK_UP (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK (1 << 0)
+#define WORK_TX_DONE (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+ u32 cmd_sts; /* Descriptor command status */
+ u16 byte_cnt; /* Descriptor buffer byte count */
+ u16 buf_size; /* Buffer size */
+ u32 buf_ptr; /* Descriptor buffer pointer */
+ u32 next_desc_ptr; /* Next descriptor pointer */
+};
+
+struct tx_desc {
+ u32 cmd_sts; /* Command/status field */
+ u16 reserved;
+ u16 byte_cnt; /* buffer byte count */
+ u32 buf_ptr; /* pointer to buffer for this descriptor */
+ u32 next_desc_ptr; /* Pointer to next descriptor */
+};
+
+struct pxa168_eth_private {
+ int port_num; /* User Ethernet port number */
+
+ int rx_resource_err; /* Rx ring resource error flag */
+
+ /* Next available and first returning Rx resource */
+ int rx_curr_desc_q, rx_used_desc_q;
+
+ /* Next available and first returning Tx resource */
+ int tx_curr_desc_q, tx_used_desc_q;
+
+ struct rx_desc *p_rx_desc_area;
+ dma_addr_t rx_desc_dma;
+ int rx_desc_area_size;
+ struct sk_buff **rx_skb;
+
+ struct tx_desc *p_tx_desc_area;
+ dma_addr_t tx_desc_dma;
+ int tx_desc_area_size;
+ struct sk_buff **tx_skb;
+
+ struct work_struct tx_timeout_task;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ u8 work_todo;
+ int skb_size;
+
+ struct net_device_stats stats;
+ /* Size of Tx Ring per queue */
+ int tx_ring_size;
+ /* Number of tx descriptors in use */
+ int tx_desc_count;
+ /* Size of Rx Ring per queue */
+ int rx_ring_size;
+ /* Number of rx descriptors in use */
+ int rx_desc_count;
+
+ /*
+ * Used in case RX Ring is empty, which can occur when
+ * system does not have resources (skb's)
+ */
+ struct timer_list timeout;
+ struct mii_bus *smi_bus;
+ struct phy_device *phy;
+
+ /* clock */
+ struct clk *clk;
+ struct pxa168_eth_platform_data *pd;
+ /*
+ * Ethernet controller base address.
+ */
+ void __iomem *base;
+
+ /* Pointer to the hardware address filter table */
+ void *htpr;
+ dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+ HASH_ENTRY_VALID = 1,
+ SKIP = 2,
+ HASH_ENTRY_RECEIVE_DISCARD = 4,
+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+ return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+ writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+ int delay;
+ int max_retries = 40;
+
+ do {
+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+ udelay(100);
+
+ delay = 10;
+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+ && delay-- > 0) {
+ udelay(10);
+ }
+ } while (max_retries-- > 0 && delay <= 0);
+
+ if (max_retries <= 0)
+ printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+ unsigned int reg_data;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+
+ return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+ u32 reg_data;
+ int addr_shift = 5 * pep->port_num;
+
+ reg_data = rdl(pep, PHY_ADDRESS);
+ reg_data &= ~(0x1f << addr_shift);
+ reg_data |= (phy_addr & 0x1f) << addr_shift;
+ wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+ int data;
+
+ data = phy_read(pep->phy, MII_BMCR);
+ if (data < 0)
+ return;
+
+ data |= BMCR_RESET;
+ if (phy_write(pep->phy, MII_BMCR, data) < 0)
+ return;
+
+ do {
+ data = phy_read(pep->phy, MII_BMCR);
+ } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_desc *p_used_rx_desc;
+ int used_rx_desc;
+
+ while (pep->rx_desc_count < pep->rx_ring_size) {
+ int size;
+
+ skb = dev_alloc_skb(pep->skb_size);
+ if (!skb)
+ break;
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
+ pep->rx_desc_count++;
+ /* Get 'used' Rx descriptor */
+ used_rx_desc = pep->rx_used_desc_q;
+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+ size = skb->end - skb->data;
+ p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+ skb->data,
+ size,
+ DMA_FROM_DEVICE);
+ p_used_rx_desc->buf_size = size;
+ pep->rx_skb[used_rx_desc] = skb;
+
+ /* Return the descriptor to DMA ownership */
+ wmb();
+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+ wmb();
+
+ /* Move the used descriptor pointer to the next descriptor */
+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+ /* Any Rx return cancels the Rx resource error status */
+ pep->rx_resource_err = 0;
+
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
+ }
+
+ /*
+ * If RX ring is empty of SKB, set a timer to try allocating
+ * again at a later time.
+ */
+ if (pep->rx_desc_count == 0) {
+ pep->timeout.expires = jiffies + (HZ / 10);
+ add_timer(&pep->timeout);
+ }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+ struct pxa168_eth_private *pep = (void *)data;
+ napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+ ((mac_addr[i] & 0xf0) >> 4);
+ }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+ int i;
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+ u32 hash_result;
+ u32 addr0;
+ u32 addr1;
+ u32 addr2;
+ u32 addr3;
+ unsigned char mac_addr[ETH_ALEN];
+
+ /* Make a copy of MAC address since we are going to performe bit
+ * operations on it
+ */
+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+ nibble_swap_every_byte(mac_addr);
+ inverse_every_nibble(mac_addr);
+
+ addr0 = (mac_addr[5] >> 2) & 0x3f;
+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+ hash_result = hash_result & 0x07ff;
+ return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ * part of chain in the hash table.We cant just delete the entry since
+ * that will break the chain.We need to defragment the tables time to
+ * time.
+ * rd - 0 Discard packet upon match.
+ * - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+ unsigned char *mac_addr,
+ u32 rd, u32 skip, int del)
+{
+ struct addr_table_entry *entry, *start;
+ u32 new_high;
+ u32 new_low;
+ u32 i;
+
+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+ | (((mac_addr[1] >> 0) & 0xf) << 11)
+ | (((mac_addr[0] >> 4) & 0xf) << 7)
+ | (((mac_addr[0] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 4) & 0x1) << 31)
+ | (((mac_addr[3] >> 0) & 0xf) << 27)
+ | (((mac_addr[2] >> 4) & 0xf) << 23)
+ | (((mac_addr[2] >> 0) & 0xf) << 19)
+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+ | HASH_ENTRY_VALID;
+
+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+ | (((mac_addr[5] >> 0) & 0xf) << 11)
+ | (((mac_addr[4] >> 4) & 0xf) << 7)
+ | (((mac_addr[4] >> 0) & 0xf) << 3)
+ | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+ /*
+ * Pick the appropriate table, start scanning for free/reusable
+ * entries at the index obtained by hashing the specified MAC address
+ */
+ start = (struct addr_table_entry *)(pep->htpr);
+ entry = start + hash_function(mac_addr);
+ for (i = 0; i < HOP_NUMBER; i++) {
+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+ break;
+ } else {
+ /* if same address put in same position */
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+ (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) == new_high)) {
+ break;
+ }
+ }
+ if (entry == start + 0x7ff)
+ entry = start;
+ else
+ entry++;
+ }
+
+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+ (le32_to_cpu(entry->hi) != new_high) && del)
+ return 0;
+
+ if (i == HOP_NUMBER) {
+ if (!del) {
+ printk(KERN_INFO "%s: table section is full, need to "
+ "move to 16kB implementation?\n",
+ __FILE__);
+ return -ENOSPC;
+ } else
+ return 0;
+ }
+
+ /*
+ * Update the selected entry
+ */
+ if (del) {
+ entry->hi = 0;
+ entry->lo = 0;
+ } else {
+ entry->hi = cpu_to_le32(new_high);
+ entry->lo = cpu_to_le32(new_low);
+ }
+
+ return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Create an addressTable entry from MAC address info
+ * found in the specifed net_device struct
+ *
+ * Input : pointer to ethernet interface network device structure
+ * Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+ unsigned char *oaddr,
+ unsigned char *addr)
+{
+ /* Delete old entry */
+ if (oaddr)
+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+ /* Add new entry */
+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+ /*
+ * Hardware expects CPU to build a hash table based on a predefined
+ * hash function and populate it based on hardware address. The
+ * location of the hash table is identified by 32-bit pointer stored
+ * in HTPR internal register. Two possible sizes exists for the hash
+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+ * 1/2kB.
+ */
+ /* TODO: Add support for 8kB hash table and alternative hash
+ * function.Driver can dynamically switch to them if the 1/2kB hash
+ * table is full.
+ */
+ if (pep->htpr == NULL) {
+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
+ if (pep->htpr == NULL)
+ return -ENOMEM;
+ }
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ wrl(pep, HTPR, pep->htpr_dma);
+ return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 val;
+
+ val = rdl(pep, PORT_CONFIG);
+ if (dev->flags & IFF_PROMISC)
+ val |= PCR_PM;
+ else
+ val &= ~PCR_PM;
+ wrl(pep, PORT_CONFIG, val);
+
+ /*
+ * Remove the old list of MAC address and add dev->addr
+ * and multicast address.
+ */
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ netdev_for_each_mc_addr(ha, dev)
+ update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned char oldMac[ETH_ALEN];
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ netif_addr_lock_bh(dev);
+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+ unsigned int val = 0;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int tx_curr_desc, rx_curr_desc;
+
+ /* Perform PHY reset, if there is a PHY. */
+ if (pep->phy != NULL) {
+ struct ethtool_cmd cmd;
+
+ pxa168_get_settings(pep->dev, &cmd);
+ ethernet_phy_reset(pep);
+ pxa168_set_settings(pep->dev, &cmd);
+ }
+
+ /* Assignment of Tx CTRP of given queue */
+ tx_curr_desc = pep->tx_curr_desc_q;
+ wrl(pep, ETH_C_TX_DESC_1,
+ (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+ /* Assignment of Rx CRDP of given queue */
+ rx_curr_desc = pep->rx_curr_desc_q;
+ wrl(pep, ETH_C_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ wrl(pep, ETH_F_RX_DESC_0,
+ (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Enable all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, ALL_INTS);
+
+ val = rdl(pep, PORT_CONFIG);
+ val |= PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+
+ /* Start RX DMA engine */
+ val = rdl(pep, SDMA_CMD);
+ val |= SDMA_CMD_ERD;
+ wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ unsigned int val = 0;
+
+ /* Stop all interrupts for receive, transmit and error. */
+ wrl(pep, INT_MASK, 0);
+
+ /* Clear all interrupts */
+ wrl(pep, INT_CAUSE, 0);
+
+ /* Stop RX DMA */
+ val = rdl(pep, SDMA_CMD);
+ val &= ~SDMA_CMD_ERD; /* abort dma command */
+
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+
+ /* Disable port */
+ val = rdl(pep, PORT_CONFIG);
+ val &= ~PCR_EN;
+ wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
+
+ netif_tx_lock(dev);
+
+ pep->work_todo &= ~WORK_TX_DONE;
+ while (pep->tx_desc_count > 0) {
+ tx_index = pep->tx_used_desc_q;
+ desc = &pep->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+ if (released > 0) {
+ goto txq_reclaim_end;
+ } else {
+ released = -1;
+ goto txq_reclaim_end;
+ }
+ }
+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+ pep->tx_desc_count--;
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = pep->tx_skb[tx_index];
+ if (skb)
+ pep->tx_skb[tx_index] = NULL;
+
+ if (cmd_sts & TX_ERROR) {
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error in TX\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ released++;
+ }
+txq_reclaim_end:
+ netif_tx_unlock(dev);
+ return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ printk(KERN_INFO "%s: TX timeout desc_count %d\n",
+ dev->name, pep->tx_desc_count);
+
+ schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+ struct pxa168_eth_private *pep = container_of(work,
+ struct pxa168_eth_private,
+ tx_timeout_task);
+ struct net_device *dev = pep->dev;
+ pxa168_eth_stop(dev);
+ pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int received_packets = 0;
+ struct sk_buff *skb;
+
+ while (budget-- > 0) {
+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+ struct rx_desc *rx_desc;
+ unsigned int cmd_sts;
+
+ /* Do not process Rx ring in case of Rx ring resource error */
+ if (pep->rx_resource_err)
+ break;
+ rx_curr_desc = pep->rx_curr_desc_q;
+ rx_used_desc = pep->rx_used_desc_q;
+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+ cmd_sts = rx_desc->cmd_sts;
+ rmb();
+ if (cmd_sts & (BUF_OWNED_BY_DMA))
+ break;
+ skb = pep->rx_skb[rx_curr_desc];
+ pep->rx_skb[rx_curr_desc] = NULL;
+
+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+ pep->rx_curr_desc_q = rx_next_curr_desc;
+
+ /* Rx descriptors exhausted. */
+ /* Set the Rx ring resource error flag */
+ if (rx_next_curr_desc == rx_used_desc)
+ pep->rx_resource_err = 1;
+ pep->rx_desc_count--;
+ dma_unmap_single(NULL, rx_desc->buf_ptr,
+ rx_desc->buf_size,
+ DMA_FROM_DEVICE);
+ received_packets++;
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
+ stats->rx_packets++;
+ stats->rx_bytes += rx_desc->byte_cnt;
+ /*
+ * In case received a packet without first / last bits on OR
+ * the error summary bit is on, the packets needs to be droped.
+ */
+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC))
+ || (cmd_sts & RX_ERROR)) {
+
+ stats->rx_dropped++;
+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+ (RX_FIRST_DESC | RX_LAST_DESC)) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Rx pkt on multiple desc\n",
+ dev->name);
+ }
+ if (cmd_sts & RX_ERROR)
+ stats->rx_errors++;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /*
+ * The -4 is for the CRC in the trailer of the
+ * received packet
+ */
+ skb_put(skb, rx_desc->byte_cnt - 4);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ }
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+ struct net_device *dev)
+{
+ u32 icr;
+ int ret = 0;
+
+ icr = rdl(pep, INT_CAUSE);
+ if (icr == 0)
+ return IRQ_NONE;
+
+ wrl(pep, INT_CAUSE, ~icr);
+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+ pep->work_todo |= WORK_TX_DONE;
+ ret = 1;
+ }
+ if (icr & ICR_RXBUF)
+ ret = 1;
+ if (icr & ICR_MII_CH) {
+ pep->work_todo |= WORK_LINK;
+ ret = 1;
+ }
+ return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+ struct net_device *dev = pep->dev;
+ u32 port_status;
+ int speed;
+ int duplex;
+ int fc;
+
+ port_status = rdl(pep, PORT_STATUS);
+ if (!(port_status & LINK_UP)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ txq_reclaim(dev, 1);
+ }
+ return;
+ }
+ if (port_status & PORT_SPEED_100)
+ speed = 100;
+ else
+ speed = 10;
+
+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+ return IRQ_NONE;
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ napi_schedule(&pep->napi);
+ return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ /*
+ * Reserve 2+14 bytes for an ethernet header (the hardware
+ * automatically prepends 2 bytes of dummy data to each
+ * received packet), 16 bytes for up to four VLAN tags, and
+ * 4 bytes for the trailing FCS -- 36 bytes total.
+ */
+ skb_size = pep->dev->mtu + 36;
+
+ /*
+ * Make sure that the skb size is a multiple of 8 bytes, as
+ * the lower three bits of the receive descriptor's buffer
+ * size field are ignored by the hardware.
+ */
+ pep->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+ int skb_size;
+
+ pxa168_eth_recalc_skb_size(pep);
+ if (pep->skb_size <= 1518)
+ skb_size = PCXR_MFL_1518;
+ else if (pep->skb_size <= 1536)
+ skb_size = PCXR_MFL_1536;
+ else if (pep->skb_size <= 2048)
+ skb_size = PCXR_MFL_2048;
+ else
+ skb_size = PCXR_MFL_64K;
+
+ /* Extended Port Configuration */
+ wrl(pep,
+ PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+ PCXR_DSCP_EN | /* Enable DSCP in IP */
+ skb_size | PCXR_FLP | /* do not force link pass */
+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
+
+ return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+ int err = 0;
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ /* Abort any transmit and receive operations and put DMA
+ * in idle state.
+ */
+ abort_dma(pep);
+ /* Initialize address hash table */
+ err = init_hash_table(pep);
+ if (err)
+ return err;
+ /* SDMA configuration */
+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
+ SDCR_RIFB | /* Rx interrupt on frame */
+ SDCR_BLMT | /* Little endian transmit */
+ SDCR_BLMR | /* Little endian receive */
+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */
+ /* Port Configuration */
+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
+ set_port_config_ext(pep);
+
+ return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct rx_desc *p_rx_desc;
+ int size = 0, i = 0;
+ int rx_desc_num = pep->rx_ring_size;
+
+ /* Allocate RX skb rings */
+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+ GFP_KERNEL);
+ if (!pep->rx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate RX ring */
+ pep->rx_desc_count = 0;
+ size = pep->rx_ring_size * sizeof(struct rx_desc);
+ pep->rx_desc_area_size = size;
+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma, GFP_KERNEL);
+ if (!pep->p_rx_desc_area) {
+ printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_rx_desc_area, 0, size);
+ /* initialize the next_desc_ptr links in the Rx descriptors ring */
+ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ for (i = 0; i < rx_desc_num; i++) {
+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+ }
+ /* Save Rx desc pointer to driver struct. */
+ pep->rx_curr_desc_q = 0;
+ pep->rx_used_desc_q = 0;
+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+ return 0;
+out:
+ kfree(pep->rx_skb);
+ return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int curr;
+
+ /* Free preallocated skb's on RX rings */
+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+ if (pep->rx_skb[curr]) {
+ dev_kfree_skb(pep->rx_skb[curr]);
+ pep->rx_desc_count--;
+ }
+ }
+ if (pep->rx_desc_count)
+ printk(KERN_ERR
+ "Error in freeing Rx Ring. %d skb's still\n",
+ pep->rx_desc_count);
+ /* Free RX ring */
+ if (pep->p_rx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+ pep->p_rx_desc_area, pep->rx_desc_dma);
+ kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct tx_desc *p_tx_desc;
+ int size = 0, i = 0;
+ int tx_desc_num = pep->tx_ring_size;
+
+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+ GFP_KERNEL);
+ if (!pep->tx_skb) {
+ printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+ return -ENOMEM;
+ }
+ /* Allocate TX ring */
+ pep->tx_desc_count = 0;
+ size = pep->tx_ring_size * sizeof(struct tx_desc);
+ pep->tx_desc_area_size = size;
+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma, GFP_KERNEL);
+ if (!pep->p_tx_desc_area) {
+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+ dev->name, size);
+ goto out;
+ }
+ memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ for (i = 0; i < tx_desc_num; i++) {
+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+ }
+ pep->tx_curr_desc_q = 0;
+ pep->tx_used_desc_q = 0;
+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+ return 0;
+out:
+ kfree(pep->tx_skb);
+ return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ /* Free outstanding skb's on TX ring */
+ txq_reclaim(dev, 1);
+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+ /* Free TX ring */
+ if (pep->p_tx_desc_area)
+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+ pep->p_tx_desc_area, pep->tx_desc_dma);
+ kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = request_irq(dev->irq, pxa168_eth_int_handler,
+ IRQF_DISABLED, dev->name, dev);
+ if (err) {
+ dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ return -EAGAIN;
+ }
+ pep->rx_resource_err = 0;
+ err = rxq_init(dev);
+ if (err != 0)
+ goto out_free_irq;
+ err = txq_init(dev);
+ if (err != 0)
+ goto out_free_rx_skb;
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+
+ /* Fill RX ring with skb's */
+ rxq_refill(dev);
+ pep->rx_used_desc_q = 0;
+ pep->rx_curr_desc_q = 0;
+ netif_carrier_off(dev);
+ eth_port_start(dev);
+ napi_enable(&pep->napi);
+ return 0;
+out_free_rx_skb:
+ rxq_deinit(dev);
+out_free_irq:
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ eth_port_reset(dev);
+
+ /* Disable interrupts */
+ wrl(pep, INT_MASK, 0);
+ wrl(pep, INT_CAUSE, 0);
+ /* Write to ICR to clear interrupts. */
+ wrl(pep, INT_W_CLEAR, 0);
+ napi_disable(&pep->napi);
+ del_timer_sync(&pep->timeout);
+ netif_carrier_off(dev);
+ free_irq(dev->irq, dev);
+ rxq_deinit(dev);
+ txq_deinit(dev);
+
+ return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+ int retval;
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if ((mtu > 9500) || (mtu < 68))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+ retval = set_port_config_ext(pep);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * Stop and then re-open the interface. This will allocate RX
+ * skbs of the new MTU.
+ * There is a possible danger that the open will not succeed,
+ * due to memory being full.
+ */
+ pxa168_eth_stop(dev);
+ if (pxa168_eth_open(dev)) {
+ dev_printk(KERN_ERR, &dev->dev,
+ "fatal error on re-opening device after "
+ "MTU change\n");
+ }
+
+ return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+ int tx_desc_curr;
+
+ tx_desc_curr = pep->tx_curr_desc_q;
+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+ pep->tx_desc_count++;
+
+ return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pxa168_eth_private *pep =
+ container_of(napi, struct pxa168_eth_private, napi);
+ struct net_device *dev = pep->dev;
+ int work_done = 0;
+
+ if (unlikely(pep->work_todo & WORK_LINK)) {
+ pep->work_todo &= ~(WORK_LINK);
+ handle_link_event(pep);
+ }
+ /*
+ * We call txq_reclaim every time since in NAPI interupts are disabled
+ * and due to this we miss the TX_DONE interrupt,which is not updated in
+ * interrupt status register.
+ */
+ txq_reclaim(dev, 0);
+ if (netif_queue_stopped(dev)
+ && pep->tx_ring_size - pep->tx_desc_count > 1) {
+ netif_wake_queue(dev);
+ }
+ work_done = rxq_process(dev, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ wrl(pep, INT_MASK, ALL_INTS);
+ }
+
+ return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct tx_desc *desc;
+ int tx_index;
+ int length;
+
+ tx_index = eth_alloc_tx_desc_index(pep);
+ desc = &pep->p_tx_desc_area[tx_index];
+ length = skb->len;
+ pep->tx_skb[tx_index] = skb;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ wmb();
+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+ wmb();
+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+ /* We handled the current skb, but now we are out of space.*/
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+ int i = 0;
+
+ /* wait for the SMI register to become available */
+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+ if (i == PHY_WAIT_ITERATIONS)
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+ int i = 0;
+ int val;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+ /* now wait for the data to be valid */
+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+ if (i == PHY_WAIT_ITERATIONS) {
+ printk(KERN_WARNING
+ "pxa168_eth: SMI bus read not valid\n");
+ return -ENODEV;
+ }
+ msleep(10);
+ }
+
+ return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct pxa168_eth_private *pep = bus->priv;
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+ SMI_OP_W | (value & 0xffff));
+
+ if (smi_wait_ready(pep)) {
+ printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ if (pep->phy != NULL)
+ return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+ struct mii_bus *bus = pep->smi_bus;
+ struct phy_device *phydev;
+ int start;
+ int num;
+ int i;
+
+ if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+ /* Scan entire range */
+ start = ethernet_phy_get(pep);
+ num = 32;
+ } else {
+ /* Use phy addr specific to platform */
+ start = phy_addr & 0x1f;
+ num = 1;
+ }
+ phydev = NULL;
+ for (i = 0; i < num; i++) {
+ int addr = (start + i) & 0x1f;
+ if (bus->phy_map[addr] == NULL)
+ mdiobus_scan(bus, addr);
+
+ if (phydev == NULL) {
+ phydev = bus->phy_map[addr];
+ if (phydev != NULL)
+ ethernet_phy_set_addr(pep, addr);
+ }
+ }
+
+ return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+ struct phy_device *phy = pep->phy;
+ ethernet_phy_reset(pep);
+
+ phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+ if (speed == 0) {
+ phy->autoneg = AUTONEG_ENABLE;
+ phy->speed = 0;
+ phy->duplex = 0;
+ phy->supported &= PHY_BASIC_FEATURES;
+ phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ } else {
+ phy->autoneg = AUTONEG_DISABLE;
+ phy->advertising = 0;
+ phy->speed = speed;
+ phy->duplex = duplex;
+ }
+ phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->pd->init)
+ pep->pd->init();
+ pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+ if (pep->phy != NULL)
+ phy_init(pep, pep->pd->speed, pep->pd->duplex);
+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+ return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+ int err;
+
+ err = phy_read_status(pep->phy);
+ if (err == 0)
+ err = phy_ethtool_gset(pep->phy, cmd);
+
+ return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRIVER_NAME, 32);
+ strncpy(info->version, DRIVER_VERSION, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ strncpy(info->bus_info, "N/A", 32);
+}
+
+static u32 pxa168_get_link(struct net_device *dev)
+{
+ return !!netif_carrier_ok(dev);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+ .get_settings = pxa168_get_settings,
+ .set_settings = pxa168_set_settings,
+ .get_drvinfo = pxa168_get_drvinfo,
+ .get_link = pxa168_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+ .ndo_open = pxa168_eth_open,
+ .ndo_stop = pxa168_eth_stop,
+ .ndo_start_xmit = pxa168_eth_start_xmit,
+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+ .ndo_set_mac_address = pxa168_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = pxa168_eth_do_ioctl,
+ .ndo_change_mtu = pxa168_eth_change_mtu,
+ .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+ struct pxa168_eth_private *pep = NULL;
+ struct net_device *dev = NULL;
+ struct resource *res;
+ struct clk *clk;
+ int err;
+
+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+ clk = clk_get(&pdev->dev, "MFUCLK");
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+ clk_enable(clk);
+
+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ pep = netdev_priv(dev);
+ pep->dev = dev;
+ pep->clk = clk;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -ENODEV;
+ goto err_netdev;
+ }
+ pep->base = ioremap(res->start, res->end - res->start + 1);
+ if (pep->base == NULL) {
+ err = -ENOMEM;
+ goto err_netdev;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ BUG_ON(!res);
+ dev->irq = res->start;
+ dev->netdev_ops = &pxa168_eth_netdev_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->base_addr = 0;
+ SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+ printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+ random_ether_addr(dev->dev_addr);
+
+ pep->pd = pdev->dev.platform_data;
+ pep->rx_ring_size = NUM_RX_DESCS;
+ if (pep->pd->rx_queue_size)
+ pep->rx_ring_size = pep->pd->rx_queue_size;
+
+ pep->tx_ring_size = NUM_TX_DESCS;
+ if (pep->pd->tx_queue_size)
+ pep->tx_ring_size = pep->pd->tx_queue_size;
+
+ pep->port_num = pep->pd->port_number;
+ /* Hardware supports only 3 ports */
+ BUG_ON(pep->port_num > 2);
+ netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+ memset(&pep->timeout, 0, sizeof(struct timer_list));
+ init_timer(&pep->timeout);
+ pep->timeout.function = rxq_refill_timer_wrapper;
+ pep->timeout.data = (unsigned long)pep;
+
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+ goto err_base;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+ pep->smi_bus->read = pxa168_smi_read;
+ pep->smi_bus->write = pxa168_smi_write;
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ pep->smi_bus->parent = &pdev->dev;
+ pep->smi_bus->phy_mask = 0xffffffff;
+ err = mdiobus_register(pep->smi_bus);
+ if (err)
+ goto err_free_mdio;
+
+ pxa168_init_hw(pep);
+ err = ethernet_phy_setup(dev);
+ if (err)
+ goto err_mdiobus;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = register_netdev(dev);
+ if (err)
+ goto err_mdiobus;
+ return 0;
+
+err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+err_base:
+ iounmap(pep->base);
+err_netdev:
+ free_netdev(dev);
+err_clk:
+ clk_disable(clk);
+ clk_put(clk);
+ return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct pxa168_eth_private *pep = netdev_priv(dev);
+
+ if (pep->htpr) {
+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+ pep->htpr, pep->htpr_dma);
+ pep->htpr = NULL;
+ }
+ if (pep->clk) {
+ clk_disable(pep->clk);
+ clk_put(pep->clk);
+ pep->clk = NULL;
+ }
+ if (pep->phy != NULL)
+ phy_detach(pep->phy);
+
+ iounmap(pep->base);
+ pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+ flush_scheduled_work();
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+ return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+ .probe = pxa168_eth_probe,
+ .remove = pxa168_eth_remove,
+ .shutdown = pxa168_eth_shutdown,
+ .resume = pxa168_eth_resume,
+ .suspend = pxa168_eth_suspend,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init pxa168_init_module(void)
+{
+ return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+ platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 6168a130f33f..7496ed2c34ab 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2029,7 +2029,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb->data);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
netif_receive_skb(skb);
@@ -2076,7 +2076,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
PCI_DMA_FROMDEVICE);
prefetch(skb2->data);
- skb2->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb2);
if (qdev->device_id == QL3022_DEVICE_ID) {
/*
* Copy the ethhdr from first buffer to second. This
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 970389331bbc..714ddf461d73 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,9 +51,11 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 7
-#define QLCNIC_LINUX_VERSIONID "5.0.7"
+#define _QLCNIC_LINUX_SUBVERSION 10
+#define QLCNIC_LINUX_VERSIONID "5.0.10"
#define QLCNIC_DRV_IDC_VER 0x01
+#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
+ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -148,6 +150,7 @@
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
(((index) + 1) & ((length) - 1))
@@ -172,7 +175,7 @@
((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
- ((_desc)->flags_opcode = \
+ ((_desc)->flags_opcode |= \
cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
@@ -221,7 +224,8 @@ struct rcv_desc {
#define QLCNIC_LRO_DESC 0x12
/* for status field in status_desc */
-#define STATUS_CKSUM_OK (2)
+#define STATUS_CKSUM_LOOP 0
+#define STATUS_CKSUM_OK 2
/* owner bits of status_desc */
#define STATUS_OWNER_HOST (0x1ULL << 56)
@@ -555,6 +559,8 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_TIMEOUT 17
@@ -717,6 +723,8 @@ struct qlcnic_cardrsp_tx_ctx {
#define QLCNIC_MAC_NOOP 0
#define QLCNIC_MAC_ADD 1
#define QLCNIC_MAC_DEL 2
+#define QLCNIC_MAC_VLAN_ADD 3
+#define QLCNIC_MAC_VLAN_DEL 4
struct qlcnic_mac_list_s {
struct list_head list;
@@ -893,9 +901,14 @@ struct qlcnic_mac_req {
#define QLCNIC_MSI_ENABLED 0x02
#define QLCNIC_MSIX_ENABLED 0x04
#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_DISABLED 0x00
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
#define QLCNIC_ESWITCH_ENABLED 0x40
+#define QLCNIC_ADAPTER_INITIALIZED 0x80
+#define QLCNIC_TAGGING_ENABLED 0x100
+#define QLCNIC_MACSPOOF 0x200
+#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -916,6 +929,22 @@ struct qlcnic_mac_req {
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_LB_MAX_FILTERS 64
+
+struct qlcnic_filter {
+ struct hlist_node fnode;
+ u8 faddr[ETH_ALEN];
+ u16 vlan_id;
+ unsigned long ftime;
+};
+
+struct qlcnic_filter_hash {
+ struct hlist_head *fhead;
+ u8 fnum;
+ u8 fmax;
+};
+
struct qlcnic_adapter {
struct qlcnic_hardware_context ahw;
@@ -924,6 +953,7 @@ struct qlcnic_adapter {
struct list_head mac_list;
spinlock_t tx_clean_lock;
+ spinlock_t mac_learn_lock;
u16 num_txd;
u16 num_rxd;
@@ -931,7 +961,6 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
- u8 driver_mismatch;
u8 msix_supported;
u8 rx_csum;
u8 portnum;
@@ -961,6 +990,7 @@ struct qlcnic_adapter {
u16 max_tx_ques;
u16 max_rx_ques;
u16 max_mtu;
+ u16 pvid;
u32 fw_hal_version;
u32 capabilities;
@@ -969,7 +999,7 @@ struct qlcnic_adapter {
u32 temp;
u32 int_vec_bit;
- u32 heartbit;
+ u32 heartbeat;
u8 max_mac_filters;
u8 dev_state;
@@ -983,6 +1013,7 @@ struct qlcnic_adapter {
u64 dev_rst_time;
+ struct vlan_group *vlgrp;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -1003,6 +1034,8 @@ struct qlcnic_adapter {
struct qlcnic_nic_intr_coalesce coal;
+ struct qlcnic_filter_hash fhash;
+
unsigned long state;
__le32 file_prd_off; /*File fw product offset*/
u32 fw_version;
@@ -1042,7 +1075,7 @@ struct qlcnic_pci_info {
};
struct qlcnic_npar_info {
- u16 vlan_id;
+ u16 pvid;
u16 min_bw;
u16 max_bw;
u8 phy_port;
@@ -1050,11 +1083,13 @@ struct qlcnic_npar_info {
u8 active;
u8 enable_pm;
u8 dest_npar;
- u8 host_vlan_tag;
- u8 promisc_mode;
u8 discard_tagged;
- u8 mac_learning;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 promisc_mode;
+ u8 offload_flags;
};
+
struct qlcnic_eswitch {
u8 port;
u8 active_vports;
@@ -1086,7 +1121,6 @@ struct qlcnic_eswitch {
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
-#define IS_VALID_MODE(mode) (mode == 0 || mode == 1)
struct qlcnic_pci_func_cfg {
u16 func_type;
@@ -1118,12 +1152,41 @@ struct qlcnic_pm_func_cfg {
struct qlcnic_esw_func_cfg {
u16 vlan_id;
+ u8 op_mode;
+ u8 op_type;
u8 pci_func;
u8 host_vlan_tag;
u8 promisc_mode;
u8 discard_tagged;
- u8 mac_learning;
- u8 reserved;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 offload_flags;
+ u8 reserved[5];
+};
+
+#define QLCNIC_STATS_VERSION 1
+#define QLCNIC_STATS_PORT 1
+#define QLCNIC_STATS_ESWITCH 2
+#define QLCNIC_QUERY_RX_COUNTER 0
+#define QLCNIC_QUERY_TX_COUNTER 1
+struct __qlcnic_esw_statistics {
+ __le16 context_id;
+ __le16 version;
+ __le16 size;
+ __le16 unused;
+ __le64 unicast_frames;
+ __le64 multicast_frames;
+ __le64 broadcast_frames;
+ __le64 dropped_frames;
+ __le64 errors;
+ __le64 local_frames;
+ __le64 numbytes;
+ __le64 rsvd[3];
+};
+
+struct qlcnic_esw_statistics {
+ struct __qlcnic_esw_statistics rx;
+ struct __qlcnic_esw_statistics tx;
};
int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
@@ -1171,6 +1234,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
/* Functions from qlcnic_init.c */
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
@@ -1199,7 +1264,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
-int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
struct qlcnic_host_rds_ring *rds_ring);
@@ -1220,7 +1285,6 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring);
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
@@ -1249,9 +1313,16 @@ int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
struct qlcnic_eswitch *);
int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
-int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
- u8, u8, u16);
+int qlcnic_config_switch_port(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
extern int qlcnic_config_tso;
/*
@@ -1280,6 +1351,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
"3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
{0x1077, 0x8020, 0x1077, 0x20f,
"3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Flex-10 Server Adapter"},
{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
};
@@ -1298,7 +1371,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
struct qlcnic_nic_template {
- int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
int (*start_firmware) (struct qlcnic_adapter *);
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index cc5d861d9a12..95a821e0b66f 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -813,9 +813,8 @@ int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
eswitch->port = arg1 & 0xf;
- eswitch->active_vports = LSB(arg2);
- eswitch->max_ucast_filters = MSB(arg2);
- eswitch->max_active_vlans = LSB(MSW(arg2));
+ eswitch->max_ucast_filters = LSW(arg2);
+ eswitch->max_active_vlans = MSW(arg2) & 0xfff;
if (arg1 & BIT_6)
eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
if (arg1 & BIT_7)
@@ -943,43 +942,271 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
return err;
}
-/* Configure eSwitch port */
-int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
- int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
- u8 mac_learn, u8 pci_func, u16 vlan_id)
+int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
+ struct __qlcnic_esw_statistics *stats;
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ u32 arg1;
+ int err;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
+ func != adapter->ahw.pci_func) {
+ dev_err(&adapter->pdev->dev,
+ "Not privilege to query stats for func=%d", func);
+ return -EIO;
+ }
+
+ stats_addr = pci_alloc_consistent(adapter->pdev, stats_size,
+ &stats_dma_t);
+ if (!stats_addr) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ memset(stats_addr, 0, stats_size);
+
+ arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
+ arg1 |= rx_tx << 15 | stats_size << 16;
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ MSD(stats_dma_t),
+ LSD(stats_dma_t),
+ QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
+
+ if (!err) {
+ stats = (struct __qlcnic_esw_statistics *)stats_addr;
+ esw_stats->context_id = le16_to_cpu(stats->context_id);
+ esw_stats->version = le16_to_cpu(stats->version);
+ esw_stats->size = le16_to_cpu(stats->size);
+ esw_stats->multicast_frames =
+ le64_to_cpu(stats->multicast_frames);
+ esw_stats->broadcast_frames =
+ le64_to_cpu(stats->broadcast_frames);
+ esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
+ esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
+ esw_stats->local_frames = le64_to_cpu(stats->local_frames);
+ esw_stats->errors = le64_to_cpu(stats->errors);
+ esw_stats->numbytes = le64_to_cpu(stats->numbytes);
+ }
+
+ pci_free_consistent(adapter->pdev, stats_size, stats_addr,
+ stats_dma_t);
+ return err;
+}
+
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ struct __qlcnic_esw_statistics port_stats;
+ u8 i;
+ int ret = -EIO;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+ if (adapter->npars == NULL)
+ return -EIO;
+
+ memset(esw_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+ esw_stats->context_id = eswitch;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].phy_port != eswitch)
+ continue;
+
+ memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+ if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
+ continue;
+
+ esw_stats->size = port_stats.size;
+ esw_stats->version = port_stats.version;
+ esw_stats->unicast_frames += port_stats.unicast_frames;
+ esw_stats->multicast_frames += port_stats.multicast_frames;
+ esw_stats->broadcast_frames += port_stats.broadcast_frames;
+ esw_stats->dropped_frames += port_stats.dropped_frames;
+ esw_stats->errors += port_stats.errors;
+ esw_stats->local_frames += port_stats.local_frames;
+ esw_stats->numbytes += port_stats.numbytes;
+
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
+ const u8 port, const u8 rx_tx)
{
- int err = -EIO;
+
u32 arg1;
- struct qlcnic_eswitch *eswitch;
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
- return err;
+ return -EIO;
- eswitch = &adapter->eswitch[id];
- if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
+ if (func_esw == QLCNIC_STATS_PORT) {
+ if (port >= QLCNIC_MAX_PCI_FUNC)
+ goto err_ret;
+ } else if (func_esw == QLCNIC_STATS_ESWITCH) {
+ if (port >= QLCNIC_NIU_MAX_XG_PORTS)
+ goto err_ret;
+ } else {
+ goto err_ret;
+ }
+
+ if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
+ goto err_ret;
+
+ arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
+ arg1 |= BIT_14 | rx_tx << 15;
+
+ return qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
+
+err_ret:
+ dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
+ "rx_ctx=%d\n", func_esw, port, rx_tx);
+ return -EIO;
+}
+
+static int
+__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ u32 *arg1, u32 *arg2)
+{
+ int err = -EIO;
+ u8 pci_func;
+ pci_func = (*arg1 >> 8);
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ *arg1,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+ dev_info(&adapter->pdev->dev,
+ "eSwitch port config for pci func %d\n", pci_func);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get eswitch port config for pci func %d\n",
+ pci_func);
+ }
+ return err;
+}
+/* Configure eSwitch port
+op_mode = 0 for setting default port behavior
+op_mode = 1 for setting vlan id
+op_mode = 2 for deleting vlan id
+op_type = 0 for vlan_id
+op_type = 1 for port vlan_id
+*/
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ int err = -EIO;
+ u32 arg1, arg2 = 0;
+ u8 pci_func;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return err;
+ pci_func = esw_cfg->pci_func;
+ arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
+ arg1 |= (pci_func << 8);
- arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
- arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
- arg1 |= pci_func << 8;
- if (vlan_tagging)
- arg1 |= BIT_5 | (vlan_id << 16);
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return err;
+ arg1 &= ~(0x0ff << 8);
+ arg1 |= (pci_func << 8);
+ arg1 &= ~(BIT_2 | BIT_3);
+ switch (esw_cfg->op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ arg1 |= (BIT_4 | BIT_6 | BIT_7);
+ arg2 |= (BIT_0 | BIT_1);
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ arg2 |= (BIT_2 | BIT_3);
+ if (!(esw_cfg->discard_tagged))
+ arg1 &= ~BIT_4;
+ if (!(esw_cfg->promisc_mode))
+ arg1 &= ~BIT_6;
+ if (!(esw_cfg->mac_override))
+ arg1 &= ~BIT_7;
+ if (!(esw_cfg->mac_anti_spoof))
+ arg2 &= ~BIT_0;
+ if (!(esw_cfg->offload_flags & BIT_0))
+ arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
+ if (!(esw_cfg->offload_flags & BIT_1))
+ arg2 &= ~BIT_2;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ arg2 &= ~BIT_3;
+ break;
+ case QLCNIC_ADD_VLAN:
+ arg1 |= (BIT_2 | BIT_5);
+ arg1 |= (esw_cfg->vlan_id << 16);
+ break;
+ case QLCNIC_DEL_VLAN:
+ arg1 |= (BIT_3 | BIT_5);
+ arg1 &= ~(0x0ffff << 16);
+ break;
+ default:
+ return err;
+ }
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
adapter->fw_hal_version,
arg1,
- 0,
+ arg2,
0,
QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
- "Failed to configure eswitch port%d\n", eswitch->port);
+ "Failed to configure eswitch pci func %d\n", pci_func);
} else {
dev_info(&adapter->pdev->dev,
- "Configured eSwitch for port %d\n", eswitch->port);
+ "Configured eSwitch for pci func %d\n", pci_func);
}
return err;
}
+
+int
+qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ u32 arg1, arg2;
+ u8 phy_port;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
+ else
+ phy_port = adapter->physical_port;
+ arg1 = phy_port;
+ arg1 |= (esw_cfg->pci_func << 8);
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return -EIO;
+
+ esw_cfg->discard_tagged = !!(arg1 & BIT_4);
+ esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
+ esw_cfg->promisc_mode = !!(arg1 & BIT_6);
+ esw_cfg->mac_override = !!(arg1 & BIT_7);
+ esw_cfg->vlan_id = LSW(arg1 >> 16);
+ esw_cfg->mac_anti_spoof = (arg2 & 0x1);
+ esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
+
+ return 0;
+}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 9328d59e21e0..cb9463bd6b1e 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -99,7 +99,7 @@ static const u32 diag_registers[] = {
CRB_XG_STATE_P3,
CRB_FW_CAPABILITIES_1,
ISR_INT_STATE_REG,
- QLCNIC_CRB_DEV_REF_COUNT,
+ QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
QLCNIC_CRB_DRV_SCRATCH,
@@ -115,9 +115,13 @@ static const u32 diag_registers[] = {
-1
};
+#define QLCNIC_MGMT_API_VERSION 2
+#define QLCNIC_DEV_INFO_SIZE 1
+#define QLCNIC_ETHTOOL_REGS_VER 2
static int qlcnic_get_regs_len(struct net_device *dev)
{
- return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
+ QLCNIC_DEV_INFO_SIZE + 1;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -342,10 +346,13 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
int ring, i = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
- regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
- (adapter->pdev)->device;
+ regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
+ (adapter->ahw.revision_id << 16) | (adapter->pdev)->device;
- for (i = 0; diag_registers[i] != -1; i++)
+ regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
+ regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[i] != -1; i++)
regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
@@ -747,6 +754,14 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
{
memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev);
if (data[2])
@@ -757,15 +772,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
eth_test->flags |= ETH_TEST_FL_FAILED;
}
-
- data[0] = qlcnic_reg_test(dev);
- if (data[0])
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- /* link test */
- data[1] = (u64) qlcnic_test_link(dev);
- if (data[1])
- eth_test->flags |= ETH_TEST_FL_FAILED;
}
static void
@@ -805,6 +811,20 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
}
}
+static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return -EOPNOTSUPP;
+ if (data)
+ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ return 0;
+
+}
static u32 qlcnic_get_tx_csum(struct net_device *dev)
{
return dev->features & NETIF_F_IP_CSUM;
@@ -819,7 +839,23 @@ static u32 qlcnic_get_rx_csum(struct net_device *dev)
static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return -EOPNOTSUPP;
+ if (!!data) {
+ adapter->rx_csum = !!data;
+ return 0;
+ }
+
+ if (adapter->flags & QLCNIC_LRO_ENABLED) {
+ if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
+ return -EIO;
+
+ dev->features &= ~NETIF_F_LRO;
+ qlcnic_send_lro_cleanup(adapter);
+ }
adapter->rx_csum = !!data;
+ dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
return 0;
}
@@ -1002,6 +1038,15 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
return -EINVAL;
+ if (!adapter->rx_csum) {
+ dev_info(&adapter->pdev->dev, "rx csum is off, "
+ "cannot toggle lro\n");
+ return -EINVAL;
+ }
+
+ if ((data & ETH_FLAG_LRO) && (adapter->flags & QLCNIC_LRO_ENABLED))
+ return 0;
+
if (data & ETH_FLAG_LRO) {
hw_lro = QLCNIC_LRO_ENABLED;
netdev->features |= NETIF_F_LRO;
@@ -1048,7 +1093,7 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_pauseparam = qlcnic_get_pauseparam,
.set_pauseparam = qlcnic_set_pauseparam,
.get_tx_csum = qlcnic_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_tx_csum = qlcnic_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = qlcnic_get_tso,
.set_tso = qlcnic_set_tso,
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 15fc32070be3..716203e41dc7 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,7 @@ enum {
#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
-#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
@@ -718,8 +718,9 @@ enum {
#define QLCNIC_DEV_FAILED 0x6
#define QLCNIC_DEV_QUISCENT 0x7
-#define QLCNIC_DEV_NPAR_NOT_RDY 0
-#define QLCNIC_DEV_NPAR_RDY 1
+#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
+#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
+#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +745,15 @@ enum {
#define FW_POLL_DELAY (1 * HZ)
#define FW_FAIL_THRESH 2
+#define QLCNIC_RESET_TIMEOUT_SECS 10
+#define QLCNIC_INIT_TIMEOUT_SECS 30
+#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
+#define QLCNIC_RCVPEG_CHECK_DELAY 10
+#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
+#define QLCNIC_CMDPEG_CHECK_DELAY 500
+#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
@@ -770,6 +780,7 @@ struct qlcnic_legacy_intr_set {
#define QLCNIC_DRV_OP_MODE 0x1b2170
#define QLCNIC_MSIX_BASE 0x132110
#define QLCNIC_MAX_PCI_FUNC 8
+#define QLCNIC_MAX_VLAN_FILTERS 64
/* PCI function operational mode */
enum {
@@ -778,6 +789,12 @@ enum {
QLCNIC_NON_PRIV_FUNC = 2
};
+enum {
+ QLCNIC_PORT_DEFAULTS = 0,
+ QLCNIC_ADD_VLAN = 1,
+ QLCNIC_DEL_VLAN = 2
+};
+
#define QLC_DEV_DRV_DEFAULT 0x11111111
#define LSB(x) ((uint8_t)(x))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e08c8b0556a4..c198df90ff3c 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -297,8 +297,8 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
dev_err(&adapter->pdev->dev,
- "Failed to acquire sem=%d lock;reg_id=%d\n",
- sem, id_reg);
+ "Failed to acquire sem=%d lock; holdby=%d\n",
+ sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
return -EIO;
}
msleep(1);
@@ -375,7 +375,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
static int
qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- unsigned op)
+ u16 vlan_id, unsigned op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
@@ -391,6 +391,8 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mac_req->op = op;
memcpy(mac_req->mac_addr, addr, 6);
+ req.words[1] = cpu_to_le64(vlan_id);
+
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
@@ -415,7 +417,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, QLCNIC_MAC_ADD)) {
+ cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
kfree(cur);
return -EIO;
}
@@ -485,12 +487,63 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
while (!list_empty(head)) {
cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, QLCNIC_MAC_DEL);
+ cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
kfree(cur);
}
}
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < adapter->fhash.fmax; i++) {
+ head = &(adapter->fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ {
+ if (jiffies >
+ (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr, tmp_fil->vlan_id,
+ tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+}
+
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ int i;
+
+ for (i = 0; i < adapter->fhash.fmax; i++) {
+ head = &(adapter->fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
+ tmp_fil->vlan_id, tmp_fil->vlan_id ?
+ QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+}
+
#define QLCNIC_CONFIG_INTR_COALESCE 3
/*
@@ -715,19 +768,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
return rc;
}
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
-{
- u32 crbaddr;
- int pci_func = adapter->ahw.pci_func;
-
- crbaddr = CRB_MAC_BLOCK_START +
- (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
-
- qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
-
- return 0;
-}
-
/*
* Changes the CRB window to the specified window.
*/
@@ -1245,4 +1285,5 @@ void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
qlcnic_nic_set_promisc(adapter, mode);
+ msleep(1000);
}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744b173c..e26fa9593311 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -25,6 +25,7 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include "qlcnic.h"
struct crb_addr_pair {
@@ -45,6 +46,9 @@ static void
qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring);
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
+
static void crb_addr_transform_setup(void)
{
crb_addr_transform(XDMA);
@@ -136,8 +140,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- spin_lock(&rds_ring->lock);
-
INIT_LIST_HEAD(&rds_ring->free_list);
rx_buf = rds_ring->rx_buf_arr;
@@ -146,8 +148,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
&rds_ring->free_list);
rx_buf++;
}
-
- spin_unlock(&rds_ring->lock);
}
}
@@ -439,11 +439,14 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
u32 off;
struct pci_dev *pdev = adapter->pdev;
- /* resetall */
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+
qlcnic_rom_lock(adapter);
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
qlcnic_rom_unlock(adapter);
+ /* Init HW CRB block */
if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
@@ -524,13 +527,10 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
}
kfree(buf);
- /* p2dn replyCount */
+ /* Initialize protocol process engine */
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
- /* disable_peg_cache 0 & 1*/
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
-
- /* peg_clr_all */
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
@@ -539,9 +539,87 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
+ msleep(1);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0;
}
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(QLCNIC_RCVPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
int
qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
@@ -557,12 +635,12 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
}
adapter->physical_port = (val >> 2);
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
- timeo = 30;
+ timeo = QLCNIC_INIT_TIMEOUT_SECS;
adapter->dev_init_timeo = timeo;
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
- timeo = 10;
+ timeo = QLCNIC_RESET_TIMEOUT_SECS;
adapter->reset_ack_timeo = timeo;
@@ -906,54 +984,47 @@ qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
}
-int
-qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
{
- u32 count, old_count;
- u32 val, version, major, minor, build;
- int i, timeout;
-
- if (adapter->need_fw_reset)
- return 1;
+ if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
+ dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
- /* last attempt had failed */
- if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
- return 1;
+ qlcnic_pcie_sem_unlock(adapter, 2);
+}
- old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
+{
+ u32 heartbeat, ret = -EIO;
+ int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
- for (i = 0; i < 10; i++) {
+ adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
- timeout = msleep_interruptible(200);
- if (timeout) {
- QLCWR32(adapter, CRB_CMDPEG_STATE,
- PHAN_INITIALIZE_FAILED);
- return -EINTR;
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
}
+ } while (--retries);
- count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
- if (count != old_count)
- break;
- }
+ return ret;
+}
- /* firmware is dead */
- if (count == old_count)
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_check_fw_hearbeat(adapter)) {
+ qlcnic_rom_lock_recovery(adapter);
return 1;
+ }
- /* check if we have got newer or different file firmware */
- if (adapter->fw) {
-
- val = qlcnic_get_fw_version(adapter);
-
- version = QLCNIC_DECODE_VERSION(val);
-
- major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ if (adapter->need_fw_reset)
+ return 1;
- if (version > QLCNIC_VERSION_CODE(major, minor, build))
- return 1;
- }
+ if (adapter->fw)
+ return 1;
return 0;
}
@@ -1089,18 +1160,6 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- /* check if flashed firmware is newer */
- if (qlcnic_rom_fast_read(adapter,
- QLCNIC_FW_VERSION_OFFSET, (int *)&val))
- return -EIO;
-
- val = QLCNIC_DECODE_VERSION(val);
- if (val > ver) {
- dev_info(&pdev->dev, "%s: firmware is older than flash\n",
- fw_name[fw_type]);
- return -EINVAL;
- }
-
QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
return 0;
}
@@ -1162,78 +1221,6 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
adapter->fw = NULL;
}
-static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
-{
- u32 val;
- int retries = 60;
-
- do {
- val = QLCRD32(adapter, CRB_CMDPEG_STATE);
-
- switch (val) {
- case PHAN_INITIALIZE_COMPLETE:
- case PHAN_INITIALIZE_ACK:
- return 0;
- case PHAN_INITIALIZE_FAILED:
- goto out_err;
- default:
- break;
- }
-
- msleep(500);
-
- } while (--retries);
-
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
-
-out_err:
- dev_err(&adapter->pdev->dev, "Command Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
-}
-
-static int
-qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
-{
- u32 val;
- int retries = 2000;
-
- do {
- val = QLCRD32(adapter, CRB_RCVPEG_STATE);
-
- if (val == PHAN_PEG_RCV_INITIALIZED)
- return 0;
-
- msleep(10);
-
- } while (--retries);
-
- if (!retries) {
- dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
- }
-
- return 0;
-}
-
-int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
-{
- int err;
-
- err = qlcnic_cmd_peg_ready(adapter);
- if (err)
- return err;
-
- err = qlcnic_receive_peg_ready(adapter);
- if (err)
- return err;
-
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
-
- return err;
-}
-
static void
qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
struct qlcnic_fw_msg *msg)
@@ -1351,11 +1338,12 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb = buffer->skb;
- if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
+ cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
skb->dev = adapter->netdev;
@@ -1365,6 +1353,31 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
return skb;
}
+static int
+qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
+ u16 *vlan_tag)
+{
+ struct ethhdr *eth_hdr;
+
+ if (!__vlan_get_tag(skb, vlan_tag)) {
+ eth_hdr = (struct ethhdr *) skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+ if (!adapter->pvid)
+ return 0;
+
+ if (*vlan_tag == adapter->pvid) {
+ /* Outer vlan tag. Packet should follow non-vlan path */
+ *vlan_tag = 0xffff;
+ return 0;
+ }
+ if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+ return 0;
+
+ return -EINVAL;
+}
+
static struct qlcnic_rx_buffer *
qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_host_sds_ring *sds_ring,
@@ -1376,6 +1389,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum, pkt_offset;
+ u16 vid = 0xffff;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -1405,9 +1419,19 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
skb_pull(skb, pkt_offset);
skb->truesize = skb->len + sizeof(struct sk_buff);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
skb->protocol = eth_type_trans(skb, netdev);
- napi_gro_receive(&sds_ring->napi, skb);
+ if ((vid != 0xffff) && adapter->vlgrp)
+ vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
+ else
+ napi_gro_receive(&sds_ring->napi, skb);
adapter->stats.rx_pkts++;
adapter->stats.rxbytes += length;
@@ -1436,6 +1460,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
int index;
u16 lro_length, length, data_offset;
u32 seq_number;
+ u16 vid = 0xffff;
if (unlikely(ring > adapter->max_rds_rings))
return NULL;
@@ -1469,6 +1494,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
skb->protocol = eth_type_trans(skb, netdev);
iph = (struct iphdr *)skb->data;
@@ -1483,7 +1515,10 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
- netif_receive_skb(skb);
+ if ((vid != 0xffff) && adapter->vlgrp)
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
+ else
+ netif_receive_skb(skb);
adapter->stats.lro_pkts++;
adapter->stats.lrobytes += length;
@@ -1587,8 +1622,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
int producer, count = 0;
struct list_head *head;
- spin_lock(&rds_ring->lock);
-
producer = rds_ring->producer;
head = &rds_ring->free_list;
@@ -1618,7 +1651,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
writel((producer-1) & (rds_ring->num_desc-1),
rds_ring->crb_rcv_producer);
}
- spin_unlock(&rds_ring->lock);
}
static void
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index b9615bd745ea..17124f6e0a93 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -50,6 +50,10 @@ static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
/* Default to restricted 1G auto-neg mode */
static int wol_port_mode = 5;
+static int qlcnic_mac_learn;
+module_param(qlcnic_mac_learn, int, 0644);
+MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
+
static int use_msi = 1;
module_param(use_msi, int, 0644);
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
@@ -94,7 +98,7 @@ static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
-static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
@@ -103,13 +107,17 @@ static irqreturn_t qlcnic_msi_intr(int irq, void *data);
static irqreturn_t qlcnic_msix_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
-static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -320,7 +328,7 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
+ if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
return -EIO;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
@@ -341,6 +349,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
+ return -EOPNOTSUPP;
+
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
@@ -360,6 +371,13 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static void qlcnic_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ adapter->vlgrp = grp;
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -370,20 +388,19 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_set_mac_address = qlcnic_set_mac,
.ndo_change_mtu = qlcnic_change_mtu,
.ndo_tx_timeout = qlcnic_tx_timeout,
+ .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
};
static struct qlcnic_nic_template qlcnic_ops = {
- .get_mac_addr = qlcnic_get_mac_address,
.config_bridged_mode = qlcnic_config_bridged_mode,
.config_led = qlcnic_config_led,
.start_firmware = qlcnic_start_firmware
};
static struct qlcnic_nic_template qlcnic_vf_ops = {
- .get_mac_addr = qlcnic_get_mac_address,
.config_bridged_mode = qlcnicvf_config_bridged_mode,
.config_led = qlcnicvf_config_led,
.start_firmware = qlcnicvf_start_firmware
@@ -473,48 +490,57 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
static int
qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
- struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
- int i, ret = 0, err;
+ struct qlcnic_pci_info *pci_info;
+ int i, ret = 0;
u8 pfn;
- if (!adapter->npars)
- adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
- QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
- if (!adapter->npars)
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
return -ENOMEM;
- if (!adapter->eswitch)
- adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+ QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+ if (!adapter->npars) {
+ ret = -ENOMEM;
+ goto err_pci_info;
+ }
+
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
if (!adapter->eswitch) {
- err = -ENOMEM;
- goto err_eswitch;
+ ret = -ENOMEM;
+ goto err_npars;
}
ret = qlcnic_get_pci_info(adapter, pci_info);
- if (!ret) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
- adapter->npars[pfn].active = pci_info[i].active;
- adapter->npars[pfn].type = pci_info[i].type;
- adapter->npars[pfn].phy_port = pci_info[i].default_port;
- adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
- adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
- adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
- }
-
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ if (ret)
+ goto err_eswitch;
- return ret;
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ pfn = pci_info[i].id;
+ if (pfn > QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+ adapter->npars[pfn].active = pci_info[i].active;
+ adapter->npars[pfn].type = pci_info[i].type;
+ adapter->npars[pfn].phy_port = pci_info[i].default_port;
+ adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
}
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+ kfree(pci_info);
+ return 0;
+
+err_eswitch:
kfree(adapter->eswitch);
adapter->eswitch = NULL;
-err_eswitch:
+err_npars:
kfree(adapter->npars);
+ adapter->npars = NULL;
+err_pci_info:
+ kfree(pci_info);
return ret;
}
@@ -529,12 +555,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
/* If other drivers are not in use set their privilege level */
- ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
ret = qlcnic_api_lock(adapter);
if (ret)
goto err_lock;
- if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
- goto err_npar;
if (qlcnic_config_npars) {
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
@@ -552,18 +576,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
adapter->ahw.pci_func));
}
writel(data, priv_op);
-err_npar:
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static u32
-qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
+static void
+qlcnic_check_vf(struct qlcnic_adapter *adapter)
{
void __iomem *msix_base_addr;
void __iomem *priv_op;
- struct qlcnic_info nic_info;
u32 func;
u32 msix_base;
u32 op_mode, priv_level;
@@ -578,20 +600,6 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
adapter->ahw.pci_func = func;
- if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
- adapter->capabilities = nic_info.capabilities;
-
- if (adapter->capabilities & BIT_6)
- adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
- adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
- }
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- adapter->nic_ops = &qlcnic_ops;
- return adapter->fw_hal_version;
- }
-
/* Determine function privilege level */
priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
@@ -600,37 +608,14 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
else
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
- switch (priv_level) {
- case QLCNIC_MGMT_FUNC:
- adapter->op_mode = QLCNIC_MGMT_FUNC;
- adapter->nic_ops = &qlcnic_ops;
- qlcnic_init_pci_info(adapter);
- /* Set privilege level for other functions */
- qlcnic_set_function_modes(adapter);
- dev_info(&adapter->pdev->dev,
- "HAL Version: %d, Management function\n",
- adapter->fw_hal_version);
- break;
- case QLCNIC_PRIV_FUNC:
- adapter->op_mode = QLCNIC_PRIV_FUNC;
- dev_info(&adapter->pdev->dev,
- "HAL Version: %d, Privileged function\n",
- adapter->fw_hal_version);
- adapter->nic_ops = &qlcnic_ops;
- break;
- case QLCNIC_NON_PRIV_FUNC:
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
dev_info(&adapter->pdev->dev,
"HAL Version: %d Non Privileged function\n",
adapter->fw_hal_version);
adapter->nic_ops = &qlcnic_vf_ops;
- break;
- default:
- dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
- priv_level);
- return 0;
- }
- return adapter->fw_hal_version;
+ } else
+ adapter->nic_ops = &qlcnic_ops;
}
static int
@@ -663,10 +648,7 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
adapter->ahw.pci_base0 = mem_ptr0;
adapter->ahw.pci_len0 = pci_len0;
- if (!qlcnic_get_driver_mode(adapter)) {
- iounmap(adapter->ahw.pci_base0);
- return -EIO;
- }
+ qlcnic_check_vf(adapter);
adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
@@ -701,25 +683,7 @@ static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
u32 fw_major, fw_minor, fw_build;
- char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
- char serial_num[32];
- int i, offset, val;
- int *ptr32;
struct pci_dev *pdev = adapter->pdev;
- struct qlcnic_info nic_info;
- adapter->driver_mismatch = 0;
-
- ptr32 = (int *)&serial_num;
- offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
- for (i = 0; i < 8; i++) {
- if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
- dev_err(&pdev->dev, "error reading board info\n");
- adapter->driver_mismatch = 1;
- return;
- }
- ptr32[i] = cpu_to_le32(val);
- offset += sizeof(u32);
- }
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
@@ -727,14 +691,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->portnum == 0) {
- get_brd_name(adapter, brd_name);
-
- pr_info("%s: %s Board Chip rev 0x%x\n",
- module_name(THIS_MODULE),
- brd_name, adapter->ahw.revision_id);
- }
-
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
@@ -748,109 +704,333 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
}
- if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
- adapter->physical_port = nic_info.phys_port;
- adapter->switch_mode = nic_info.switch_mode;
- adapter->max_tx_ques = nic_info.max_tx_ques;
- adapter->max_rx_ques = nic_info.max_rx_ques;
- adapter->capabilities = nic_info.capabilities;
- adapter->max_mac_filters = nic_info.max_mac_filters;
- adapter->max_mtu = nic_info.max_mtu;
- }
-
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
adapter->num_txd = MAX_CMD_DESCRIPTORS;
- adapter->max_rds_rings = 2;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int
+qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
+ if (err)
+ return err;
+
+ adapter->physical_port = nic_info.phys_port;
+ adapter->switch_mode = nic_info.switch_mode;
+ adapter->max_tx_ques = nic_info.max_tx_ques;
+ adapter->max_rx_ques = nic_info.max_rx_ques;
+ adapter->capabilities = nic_info.capabilities;
+ adapter->max_mac_filters = nic_info.max_mac_filters;
+ adapter->max_mtu = nic_info.max_mtu;
+
+ if (adapter->capabilities & BIT_6)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ return err;
+}
+
+static void
+qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ if (esw_cfg->discard_tagged)
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ else
+ adapter->flags |= QLCNIC_TAGGING_ENABLED;
+
+ if (esw_cfg->vlan_id)
+ adapter->pvid = esw_cfg->vlan_id;
+ else
+ adapter->pvid = 0;
+}
+
+static void
+qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ adapter->flags &= ~QLCNIC_MACSPOOF;
+ adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED;
+
+ if (esw_cfg->mac_anti_spoof)
+ adapter->flags |= QLCNIC_MACSPOOF;
+
+ if (!esw_cfg->mac_override)
+ adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
+
+ qlcnic_set_netdev_features(adapter, esw_cfg);
+}
+
+static int
+qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return 0;
+
+ esw_cfg.pci_func = adapter->ahw.pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
+ return -EIO;
+ qlcnic_set_vlan_config(adapter, &esw_cfg);
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+
+ return 0;
+}
+
+static void
+qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned long features, vlan_features;
+
+ features = (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ features |= NETIF_F_LRO;
+
+ if (esw_cfg->offload_flags & BIT_0) {
+ netdev->features |= features;
+ adapter->rx_csum = 1;
+ if (!(esw_cfg->offload_flags & BIT_1))
+ netdev->features &= ~NETIF_F_TSO;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ netdev->features &= ~NETIF_F_TSO6;
+ } else {
+ netdev->features &= ~features;
+ adapter->rx_csum = 0;
+ }
+
+ netdev->vlan_features = (features & vlan_features);
+}
+
+static int
+qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
+{
+ void __iomem *priv_op;
+ u32 op_mode, priv_level;
+ int err = 0;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
+ return 0;
+
+ priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ op_mode = readl(priv_op);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (priv_level == QLCNIC_MGMT_FUNC) {
+ adapter->op_mode = QLCNIC_MGMT_FUNC;
+ err = qlcnic_init_pci_info(adapter);
+ if (err)
+ return err;
+ /* Set privilege level for other functions */
+ qlcnic_set_function_modes(adapter);
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Management function\n",
+ adapter->fw_hal_version);
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ adapter->op_mode = QLCNIC_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->fw_hal_version);
+ }
+ }
+
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ return err;
+}
+
+static int
+qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ struct qlcnic_npar_info *npar;
+ u8 i;
+
+ if (adapter->need_fw_reset)
+ return 0;
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ continue;
+ memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
+ esw_cfg.pci_func = i;
+ esw_cfg.offload_flags = BIT_0;
+ esw_cfg.mac_override = BIT_0;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+ npar = &adapter->npars[i];
+ npar->pvid = esw_cfg.vlan_id;
+ npar->mac_override = esw_cfg.mac_override;
+ npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
+ npar->discard_tagged = esw_cfg.discard_tagged;
+ npar->promisc_mode = esw_cfg.promisc_mode;
+ npar->offload_flags = esw_cfg.offload_flags;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_info *npar, int pci_func)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
+ esw_cfg.pci_func = pci_func;
+ esw_cfg.vlan_id = npar->pvid;
+ esw_cfg.mac_override = npar->mac_override;
+ esw_cfg.discard_tagged = npar->discard_tagged;
+ esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
+ esw_cfg.offload_flags = npar->offload_flags;
+ esw_cfg.promisc_mode = npar->promisc_mode;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ esw_cfg.op_mode = QLCNIC_ADD_VLAN;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ return 0;
}
static int
qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
- int i, err = 0;
+ int i, err;
struct qlcnic_npar_info *npar;
struct qlcnic_info nic_info;
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- !adapter->need_fw_reset)
+ if (!adapter->need_fw_reset)
return 0;
- if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
- /* Set the NPAR config data after FW reset */
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- npar = &adapter->npars[i];
- if (npar->type != QLCNIC_TYPE_NIC)
- continue;
- err = qlcnic_get_nic_info(adapter, &nic_info, i);
- if (err)
- goto err_out;
- nic_info.min_tx_bw = npar->min_bw;
- nic_info.max_tx_bw = npar->max_bw;
- err = qlcnic_set_nic_info(adapter, &nic_info);
+ /* Set the NPAR config data after FW reset */
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ npar = &adapter->npars[i];
+ if (npar->type != QLCNIC_TYPE_NIC)
+ continue;
+ err = qlcnic_get_nic_info(adapter, &nic_info, i);
+ if (err)
+ return err;
+ nic_info.min_tx_bw = npar->min_bw;
+ nic_info.max_tx_bw = npar->max_bw;
+ err = qlcnic_set_nic_info(adapter, &nic_info);
+ if (err)
+ return err;
+
+ if (npar->enable_pm) {
+ err = qlcnic_config_port_mirroring(adapter,
+ npar->dest_npar, 1, i);
if (err)
- goto err_out;
+ return err;
+ }
+ err = qlcnic_reset_eswitch_config(adapter, npar, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
- if (npar->enable_pm) {
- err = qlcnic_config_port_mirroring(adapter,
- npar->dest_npar, 1, i);
- if (err)
- goto err_out;
+static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
+{
+ u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
+ u32 npar_state;
- }
- npar->mac_learning = DEFAULT_MAC_LEARN;
- npar->host_vlan_tag = 0;
- npar->promisc_mode = 0;
- npar->discard_tagged = 0;
- npar->vlan_id = 0;
- }
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
+ msleep(1000);
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
}
-err_out:
+ if (!npar_opt_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for NPAR state to opertional timeout\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return 0;
+
+ err = qlcnic_set_default_offload_settings(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_reset_npar_config(adapter);
+ if (err)
+ return err;
+
+ qlcnic_dev_set_npar_ready(adapter);
+
return err;
}
static int
qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
- int val, err, first_boot;
+ int err;
err = qlcnic_can_start_firmware(adapter);
if (err < 0)
return err;
else if (!err)
- goto wait_init;
-
- first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
- if (first_boot == 0x55555555)
- /* This is the first boot after power up */
- QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ goto check_fw_status;
if (load_fw_file)
qlcnic_request_firmware(adapter);
else {
- if (qlcnic_check_flash_fw_ver(adapter))
+ err = qlcnic_check_flash_fw_ver(adapter);
+ if (err)
goto err_out;
adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
}
err = qlcnic_need_fw_reset(adapter);
- if (err < 0)
- goto err_out;
if (err == 0)
- goto wait_init;
-
- if (first_boot != 0x55555555) {
- QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
- QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
- qlcnic_pinit_from_rom(adapter);
- msleep(1);
- }
-
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+ goto check_fw_status;
+ err = qlcnic_pinit_from_rom(adapter);
+ if (err)
+ goto err_out;
qlcnic_set_port_mode(adapter);
err = qlcnic_load_firmware(adapter);
@@ -858,26 +1038,27 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
goto err_out;
qlcnic_release_firmware(adapter);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
- val = (_QLCNIC_LINUX_MAJOR << 16)
- | ((_QLCNIC_LINUX_MINOR << 8))
- | (_QLCNIC_LINUX_SUBVERSION);
- QLCWR32(adapter, CRB_DRIVER_VERSION, val);
-
-wait_init:
- /* Handshake with the card before we register the devices. */
- err = qlcnic_init_firmware(adapter);
+check_fw_status:
+ err = qlcnic_check_fw_status(adapter);
if (err)
goto err_out;
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
- qlcnic_check_options(adapter);
- if (qlcnic_reset_npar_config(adapter))
+ err = qlcnic_check_eswitch_mode(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failed for eswitch\n");
+ goto err_out;
+ }
+ err = qlcnic_set_mgmt_operations(adapter);
+ if (err)
goto err_out;
- qlcnic_dev_set_npar_ready(adapter);
+ qlcnic_check_options(adapter);
adapter->need_fw_reset = 0;
qlcnic_release_firmware(adapter);
@@ -886,6 +1067,7 @@ wait_init:
err_out:
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
dev_err(&adapter->pdev->dev, "Device state set to failed\n");
+
qlcnic_release_firmware(adapter);
return err;
}
@@ -969,6 +1151,8 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return -EIO;
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
@@ -988,7 +1172,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_config_intr_coalesce(adapter);
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
qlcnic_napi_enable(adapter);
@@ -1031,6 +1215,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_free_mac_list(adapter);
+ if (adapter->fhash.fnum)
+ qlcnic_delete_lb_filters(adapter);
+
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
qlcnic_napi_disable(adapter);
@@ -1267,7 +1454,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM);
@@ -1286,12 +1473,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->features |= NETIF_F_LRO;
-
netdev->irq = adapter->msix_entries[0].vector;
- if (qlcnic_read_mac_addr(adapter))
- dev_warn(&pdev->dev, "failed to read mac addr\n");
-
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -1328,6 +1511,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
uint8_t revision_id;
uint8_t pci_using_dac;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
err = pci_enable_device(pdev);
if (err)
@@ -1385,10 +1569,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
- if (qlcnic_read_mac_addr(adapter))
- dev_warn(&pdev->dev, "failed to read mac addr\n");
-
- if (qlcnic_setup_idc_param(adapter))
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
goto err_out_iounmap;
err = adapter->nic_ops->start_firmware(adapter);
@@ -1397,6 +1579,17 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_decr_ref;
}
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw.revision_id);
+ }
+
qlcnic_clear_stats(adapter);
qlcnic_setup_intr(adapter);
@@ -1420,6 +1613,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
+ qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_create_diag_entries(adapter);
return 0;
@@ -1428,7 +1622,7 @@ err_out_disable_msi:
qlcnic_teardown_intr(adapter);
err_out_decr_ref:
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
err_out_iounmap:
qlcnic_cleanup_pci_map(adapter);
@@ -1467,10 +1661,12 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
if (adapter->eswitch != NULL)
kfree(adapter->eswitch);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_free_lb_filters_mem(adapter);
+
qlcnic_teardown_intr(adapter);
qlcnic_remove_diag_entries(adapter);
@@ -1499,7 +1695,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1563,7 +1759,7 @@ qlcnic_resume(struct pci_dev *pdev)
if (err)
goto done;
- qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
done:
netif_device_attach(netdev);
@@ -1577,9 +1773,6 @@ static int qlcnic_open(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err;
- if (adapter->driver_mismatch)
- return -EIO;
-
err = qlcnic_attach(adapter);
if (err)
return err;
@@ -1609,6 +1802,119 @@ static int qlcnic_close(struct net_device *netdev)
}
static void
+qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ void *head;
+ int i;
+
+ if (!qlcnic_mac_learn)
+ return;
+
+ spin_lock_init(&adapter->mac_learn_lock);
+
+ head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
+ GFP_KERNEL);
+ if (!head)
+ return;
+
+ adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fhead = (struct hlist_head *)head;
+
+ for (i = 0; i < adapter->fhash.fmax; i++)
+ INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+}
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
+ kfree(adapter->fhash.fhead);
+
+ adapter->fhash.fhead = NULL;
+ adapter->fhash.fmax = 0;
+}
+
+static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 uaddr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ u32 producer;
+ u64 word;
+
+ producer = tx_ring->producer;
+ hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+ req = (struct qlcnic_nic_req *)hwdesc;
+ memset(req, 0, sizeof(struct qlcnic_nic_req));
+ req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+ req->req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+
+ req->words[1] = cpu_to_le64(vlan_id);
+
+ tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+}
+
+#define QLCNIC_MAC_HASH(MAC)\
+ ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+
+static void
+qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ u64 src_addr = 0;
+ u16 vlan_id = 0;
+ u8 hindex;
+
+ if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ return;
+
+ /* Only NPAR capable devices support vlan based learning*/
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ vlan_id = first_desc->vlan_TCI;
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ head = &(adapter->fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
+
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ spin_lock(&adapter->mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->fhash.fnum++;
+ spin_unlock(&adapter->mac_learn_lock);
+}
+
+static void
qlcnic_tso_check(struct net_device *netdev,
struct qlcnic_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
@@ -1616,26 +1922,13 @@ qlcnic_tso_check(struct net_device *netdev,
{
u8 opcode = TX_ETHER_PKT;
__be16 protocol = skb->protocol;
- u16 flags = 0, vid = 0;
- int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ u16 flags = 0;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
u32 producer = tx_ring->producer;
-
- if (protocol == cpu_to_be16(ETH_P_8021Q)) {
-
- vh = (struct vlan_ethhdr *)skb->data;
- protocol = vh->h_vlan_encapsulated_proto;
- flags = FLAGS_VLAN_TAGGED;
-
- } else if (vlan_tx_tag_present(skb)) {
-
- flags = FLAGS_VLAN_OOB;
- vid = vlan_tx_tag_get(skb);
- qlcnic_set_tx_vlan_tci(first_desc, vid);
- vlan_oob = 1;
- }
+ int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
if (*(skb->data) & BIT_0) {
flags |= BIT_0;
@@ -1706,7 +1999,7 @@ qlcnic_tso_check(struct net_device *netdev,
vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
skb_copy_from_linear_data(skb, vh, 12);
vh->h_vlan_proto = htons(ETH_P_8021Q);
- vh->h_vlan_TCI = htons(vid);
+ vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
skb_copy_from_linear_data_offset(skb, 12,
(char *)vh + 16, copy_len - 16);
@@ -1786,11 +2079,47 @@ out_err:
return -ENOMEM;
}
+static int
+qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb,
+ struct cmd_desc_type0 *first_desc)
+{
+ u8 opcode = 0;
+ u16 flags = 0;
+ __be16 protocol = skb->protocol;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+ qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
+ } else if (vlan_tx_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
+ }
+ if (unlikely(adapter->pvid)) {
+ if (first_desc->vlan_TCI &&
+ !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (first_desc->vlan_TCI &&
+ (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = FLAGS_VLAN_OOB;
+ qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
+ }
+set_flags:
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+ return 0;
+}
+
static inline void
qlcnic_clear_cmddesc(u64 *desc)
{
desc[0] = 0ULL;
desc[2] = 0ULL;
+ desc[7] = 0ULL;
}
netdev_tx_t
@@ -1802,6 +2131,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
+ struct ethhdr *phdr;
int i, k;
u32 producer;
@@ -1813,6 +2143,13 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
+ if (adapter->flags & QLCNIC_MACSPOOF) {
+ phdr = (struct ethhdr *)skb->data;
+ if (compare_ether_addr(phdr->h_source,
+ adapter->mac_addr))
+ goto drop_packet;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
@@ -1834,6 +2171,12 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pdev = adapter->pdev;
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
+ goto drop_packet;
+
if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
adapter->stats.tx_dma_map_error++;
goto drop_packet;
@@ -1842,9 +2185,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pbuf->skb = skb;
pbuf->frag_count = frag_count;
- first_desc = hwdesc = &tx_ring->desc_head[producer];
- qlcnic_clear_cmddesc((u64 *)hwdesc);
-
qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
qlcnic_set_tx_port(first_desc, adapter->portnum);
@@ -1883,6 +2223,9 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+ if (qlcnic_mac_learn)
+ qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+
qlcnic_update_cmd_producer(adapter, tx_ring);
adapter->stats.txbytes += skb->len;
@@ -1937,14 +2280,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
struct net_device *netdev = adapter->netdev;
if (adapter->ahw.linkup && !linkup) {
- dev_info(&netdev->dev, "NIC Link is down\n");
+ netdev_info(netdev, "NIC Link is down\n");
adapter->ahw.linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
} else if (!adapter->ahw.linkup && linkup) {
- dev_info(&netdev->dev, "NIC Link is up\n");
+ netdev_info(netdev, "NIC Link is up\n");
adapter->ahw.linkup = 1;
if (netif_running(netdev)) {
netif_carrier_on(netdev);
@@ -1973,8 +2316,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
- memset(stats, 0, sizeof(*stats));
-
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2180,9 +2521,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
disable_irq(adapter->irq);
- qlcnic_intr(adapter->irq, adapter);
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_intr(adapter->irq, sds_ring);
+ }
enable_irq(adapter->irq);
}
#endif
@@ -2243,18 +2591,22 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
}
static void
-qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
{
u32 val;
if (qlcnic_api_lock(adapter))
goto err;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
- if (!(val & 0x11111111))
+ if (failed) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_info(&adapter->pdev->dev,
+ "Device state set to Failed. Please Reboot\n");
+ } else if (!(val & 0x11111111))
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
@@ -2275,7 +2627,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
int act, state;
state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (((state & 0x11111111) == (act & 0x11111111)) ||
((act & 0x11111111) == ((state >> 1) & 0x11111111)))
@@ -2310,10 +2662,10 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (!(val & (1 << (portnum * 4)))) {
QLC_DEV_SET_REF_CNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
}
prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
@@ -2388,7 +2740,7 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- u32 dev_state = 0xf, npar_state;
+ u32 dev_state = 0xf;
if (qlcnic_api_lock(adapter))
goto err_ret;
@@ -2402,16 +2754,8 @@ qlcnic_fwinit_work(struct work_struct *work)
}
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
- if (npar_state == QLCNIC_DEV_NPAR_RDY) {
- qlcnic_api_unlock(adapter);
- goto wait_npar;
- } else {
- qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
- FW_POLL_DELAY);
- qlcnic_api_unlock(adapter);
- return;
- }
+ qlcnic_api_unlock(adapter);
+ goto wait_npar;
}
if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
@@ -2448,6 +2792,7 @@ skip_ack_check:
if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
return;
}
goto err_ret;
@@ -2460,27 +2805,25 @@ wait_npar:
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
switch (dev_state) {
- case QLCNIC_DEV_QUISCENT:
- case QLCNIC_DEV_NEED_QUISCENT:
- case QLCNIC_DEV_NEED_RESET:
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, FW_POLL_DELAY);
- return;
- case QLCNIC_DEV_FAILED:
- break;
-
- default:
+ case QLCNIC_DEV_READY:
if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
return;
}
+ case QLCNIC_DEV_FAILED:
+ break;
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
}
err_ret:
dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
"fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
netif_device_attach(adapter->netdev);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
}
static void
@@ -2516,8 +2859,23 @@ err_ret:
dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
status, adapter->temp);
netif_device_attach(netdev);
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 1);
+}
+
+/*Transit NPAR state to NON Operational */
+static void
+qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (state == QLCNIC_DEV_NPAR_NON_OPER)
+ return;
+ if (qlcnic_api_lock(adapter))
+ return;
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
}
/*Transit to RESET state from READY state only */
@@ -2538,6 +2896,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
qlcnic_idc_debug_info(adapter, 0);
}
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
@@ -2545,21 +2904,11 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
static void
qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
{
- u32 state;
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
- return;
if (qlcnic_api_lock(adapter))
return;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
-
- if (state != QLCNIC_DEV_NPAR_RDY) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
- QLCNIC_DEV_NPAR_RDY);
- QLCDB(adapter, DRV, "NPAR READY state set\n");
- }
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLCDB(adapter, DRV, "NPAR operational state set\n");
qlcnic_api_unlock(adapter);
}
@@ -2590,12 +2939,26 @@ qlcnic_attach_work(struct work_struct *work)
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
+ u32 npar_state;
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
+ qlcnic_clr_all_drv_state(adapter, 0);
+ else if (npar_state != QLCNIC_DEV_NPAR_OPER)
+ qlcnic_schedule_work(adapter, qlcnic_attach_work,
+ FW_POLL_DELAY);
+ else
+ goto attach;
+ QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
+ return;
+ }
+attach:
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
goto done;
- qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
done:
@@ -2611,7 +2974,7 @@ done:
static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
- u32 state = 0, heartbit;
+ u32 state = 0, heartbeat;
struct net_device *netdev = adapter->netdev;
if (qlcnic_check_temp(adapter))
@@ -2621,12 +2984,15 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
qlcnic_dev_request_reset(adapter);
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+ if (state == QLCNIC_DEV_NEED_RESET ||
+ state == QLCNIC_DEV_NEED_QUISCENT) {
+ qlcnic_set_npar_non_operational(adapter);
adapter->need_fw_reset = 1;
+ }
- heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
- if (heartbit != adapter->heartbit) {
- adapter->heartbit = heartbit;
+ heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ adapter->heartbeat = heartbeat;
adapter->fw_fail_cnt = 0;
if (adapter->need_fw_reset)
goto detach;
@@ -2677,6 +3043,9 @@ qlcnic_fw_poll_work(struct work_struct *work)
if (qlcnic_check_health(adapter))
return;
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
reschedule:
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
}
@@ -2723,7 +3092,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (qlcnic_api_lock(adapter))
return -EINVAL;
- if (first_func) {
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@ -2741,7 +3110,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (err) {
- qlcnic_clr_all_drv_state(adapter);
+ qlcnic_clr_all_drv_state(adapter, 1);
clear_bit(__QLCNIC_AER, &adapter->state);
netif_device_attach(netdev);
return err;
@@ -2751,7 +3120,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (err)
goto done;
- qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
done:
netif_device_attach(netdev);
@@ -2807,7 +3176,6 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
FW_POLL_DELAY);
}
-
static int
qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
{
@@ -2817,8 +3185,20 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
if (err)
return err;
+ err = qlcnic_check_npar_opertional(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
qlcnic_check_options(adapter);
+ err = qlcnic_set_eswitch_port_config(adapter);
+ if (err)
+ return err;
+
adapter->need_fw_reset = 0;
return err;
@@ -3078,9 +3458,6 @@ validate_pm_config(struct qlcnic_adapter *adapter,
if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
return QL_STATUS_INVALID_PARAM;
- if (!IS_VALID_MODE(pm_cfg[i].action))
- return QL_STATUS_INVALID_PARAM;
-
s_esw_id = adapter->npars[src_pci_func].phy_port;
d_esw_id = adapter->npars[dest_pci_func].phy_port;
@@ -3114,7 +3491,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
- action = pm_cfg[i].action;
+ action = !!pm_cfg[i].action;
id = adapter->npars[pci_func].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
action, pci_func);
@@ -3125,7 +3502,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
+ adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
adapter->npars[pci_func].dest_npar = id;
}
return size;
@@ -3157,30 +3534,45 @@ qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
static int
validate_esw_config(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg, int count)
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ u32 op_mode;
u8 pci_func;
int i;
+ op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
+
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- return QL_STATUS_INVALID_PARAM;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ return QL_STATUS_INVALID_PARAM;
- if (esw_cfg->host_vlan_tag == 1)
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
+ QLCNIC_NON_PRIV_FUNC) {
+ esw_cfg[i].mac_anti_spoof = 0;
+ esw_cfg[i].mac_override = 1;
+ }
+ break;
+ case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
return QL_STATUS_INVALID_PARAM;
-
- if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
- || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
- || !IS_VALID_MODE(esw_cfg[i].mac_learning)
- || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ case QLCNIC_DEL_VLAN:
+ if (!esw_cfg[i].op_type)
+ return QL_STATUS_INVALID_PARAM;
+ break;
+ default:
return QL_STATUS_INVALID_PARAM;
+ }
}
-
return 0;
}
@@ -3191,8 +3583,9 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg *esw_cfg;
+ struct qlcnic_npar_info *npar;
int count, rem, i, ret;
- u8 id, pci_func;
+ u8 pci_func, op_mode = 0;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -3205,30 +3598,55 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
return ret;
for (i = 0; i < count; i++) {
- pci_func = esw_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_switch_port(adapter, id,
- esw_cfg[i].host_vlan_tag,
- esw_cfg[i].discard_tagged,
- esw_cfg[i].promisc_mode,
- esw_cfg[i].mac_learning,
- esw_cfg[i].pci_func,
- esw_cfg[i].vlan_id);
- if (ret)
- return ret;
+ if (adapter->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
+ continue;
+
+ op_mode = esw_cfg[i].op_mode;
+ qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+ esw_cfg[i].op_mode = op_mode;
+ esw_cfg[i].pci_func = adapter->ahw.pci_func;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_ADD_VLAN:
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_DEL_VLAN:
+ esw_cfg[i].vlan_id = 0;
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ }
}
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ goto out;
+
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
- adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
- adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
- adapter->npars[pci_func].discard_tagged =
- esw_cfg[i].discard_tagged;
- adapter->npars[pci_func].host_vlan_tag =
- esw_cfg[i].host_vlan_tag;
+ npar = &adapter->npars[pci_func];
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ npar->promisc_mode = esw_cfg[i].promisc_mode;
+ npar->mac_override = esw_cfg[i].mac_override;
+ npar->offload_flags = esw_cfg[i].offload_flags;
+ npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+ npar->discard_tagged = esw_cfg[i].discard_tagged;
+ break;
+ case QLCNIC_ADD_VLAN:
+ npar->pvid = esw_cfg[i].vlan_id;
+ break;
+ case QLCNIC_DEL_VLAN:
+ npar->pvid = 0;
+ break;
+ }
}
-
+out:
return size;
}
@@ -3239,7 +3657,7 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- int i;
+ u8 i;
if (size != sizeof(esw_cfg))
return QL_STATUS_INVALID_PARAM;
@@ -3247,12 +3665,9 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
continue;
-
- esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
- esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
- esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
- esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
- esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
+ esw_cfg[i].pci_func = i;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ return QL_STATUS_INVALID_PARAM;
}
memcpy(buf, &esw_cfg, size);
@@ -3355,21 +3770,136 @@ qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
}
static ssize_t
+qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&port_stats, 0, size);
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &port_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &port_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &port_stats, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics esw_stats;
+ int ret;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return QL_STATUS_INVALID_PARAM;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ memset(&esw_stats, 0, size);
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &esw_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &esw_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &esw_stats, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
+{
+
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (offset >= QLCNIC_MAX_PCI_FUNC)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
- struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_info *pci_info;
int i, ret;
if (size != sizeof(pci_cfg))
return QL_STATUS_INVALID_PARAM;
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
ret = qlcnic_get_pci_info(adapter, pci_info);
- if (ret)
+ if (ret) {
+ kfree(pci_info);
return ret;
+ }
for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
@@ -3380,8 +3910,8 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
memcpy(buf, &pci_cfg, size);
+ kfree(pci_info);
return size;
-
}
static struct bin_attribute bin_attr_npar_config = {
.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
@@ -3397,6 +3927,20 @@ static struct bin_attribute bin_attr_pci_config = {
.write = NULL,
};
+static struct bin_attribute bin_attr_port_stats = {
+ .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static struct bin_attribute bin_attr_esw_stats = {
+ .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
+};
+
static struct bin_attribute bin_attr_esw_config = {
.attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
@@ -3436,6 +3980,9 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ if (device_create_bin_file(dev, &bin_attr_port_stats))
+ dev_info(dev, "failed to create port stats sysfs entry");
+
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
if (device_create_file(dev, &dev_attr_diag_mode))
@@ -3444,18 +3991,20 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_npar_config))
dev_info(dev, "failed to create npar config sysfs entry");
- if (device_create_bin_file(dev, &bin_attr_esw_config))
- dev_info(dev, "failed to create esw config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_pm_config))
dev_info(dev, "failed to create pm config sysfs entry");
-
+ if (device_create_bin_file(dev, &bin_attr_esw_stats))
+ dev_info(dev, "failed to create eswitch stats sysfs entry");
}
static void
@@ -3463,18 +4012,22 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ device_remove_bin_file(dev, &bin_attr_port_stats);
+
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
- adapter->op_mode != QLCNIC_MGMT_FUNC)
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_bin_file(dev, &bin_attr_npar_config);
- device_remove_bin_file(dev, &bin_attr_esw_config);
device_remove_bin_file(dev, &bin_attr_pm_config);
+ device_remove_bin_file(dev, &bin_attr_esw_stats);
}
#ifdef CONFIG_INET
@@ -3482,10 +4035,10 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
static void
-qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
+ struct net_device *dev, unsigned long event)
{
struct in_device *indev;
- struct qlcnic_adapter *adapter = netdev_priv(dev);
indev = in_dev_get(dev);
if (!indev)
@@ -3509,6 +4062,27 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
in_dev_put(indev);
}
+static void
+qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev;
+ u16 vid;
+
+ qlcnic_config_indev_addr(adapter, netdev, event);
+
+ if (!adapter->vlgrp)
+ return;
+
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ dev = vlan_group_get_device(adapter->vlgrp, vid);
+ if (!dev)
+ continue;
+
+ qlcnic_config_indev_addr(adapter, dev, event);
+ }
+}
+
static int qlcnic_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -3535,7 +4109,7 @@ recheck:
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto done;
- qlcnic_config_indev_addr(dev, event);
+ qlcnic_config_indev_addr(adapter, dev, event);
done:
return NOTIFY_DONE;
}
@@ -3552,7 +4126,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
recheck:
- if (dev == NULL || !netif_running(dev))
+ if (dev == NULL)
goto done;
if (dev->priv_flags & IFF_802_1Q_VLAN) {
@@ -3595,7 +4169,7 @@ static struct notifier_block qlcnic_inetaddr_cb = {
};
#else
static void
-qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
static struct pci_error_handlers qlcnic_err_handler = {
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8d63f69b27d9..4ffebe83d883 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1566,7 +1566,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (qdev->rx_csum &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
@@ -1676,7 +1676,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
@@ -1996,7 +1996,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
@@ -2222,10 +2222,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_update_cq(rx_ring);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
+ if (!net_rsp)
+ return 0;
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
- if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
- net_rsp != NULL) {
+ if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if (atomic_read(&tx_ring->queue_stopped) &&
(atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
@@ -3888,11 +3889,8 @@ int ql_wol(struct ql_adapter *qdev)
return status;
}
-static int ql_adapter_down(struct ql_adapter *qdev)
+static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
{
- int i, status = 0;
-
- ql_link_off(qdev);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
@@ -3904,6 +3902,15 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_idc_work);
cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+}
+
+static int ql_adapter_down(struct ql_adapter *qdev)
+{
+ int i, status = 0;
+
+ ql_link_off(qdev);
+
+ ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
napi_disable(&qdev->rx_ring[i].napi);
@@ -3919,12 +3926,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
- ql_free_rx_buffers(qdev);
-
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
+ ql_free_rx_buffers(qdev);
+
return status;
}
@@ -4726,6 +4733,7 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
del_timer_sync(&qdev->timer);
+ ql_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
@@ -4745,13 +4753,7 @@ static void ql_eeh_close(struct net_device *ndev)
/* Disabling the timer */
del_timer_sync(&qdev->timer);
- if (test_bit(QL_ADAPTER_UP, &qdev->flags))
- cancel_delayed_work_sync(&qdev->asic_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- cancel_delayed_work_sync(&qdev->mpi_core_to_log);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
+ ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 142c381e1d73..63db065508f4 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -200,7 +200,7 @@ struct r6040_private {
int old_duplex;
};
-static char version[] __devinitdata = KERN_INFO DRV_NAME
+static char version[] __devinitdata = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
@@ -224,7 +224,8 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
}
/* Write a word data from PHY Chip */
-static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val)
+static void r6040_phy_write(void __iomem *ioaddr,
+ int phy_addr, int reg, u16 val)
{
int limit = 2048;
u16 cmd;
@@ -348,8 +349,8 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
}
desc->skb_ptr = skb;
desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
- desc->skb_ptr->data,
- MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+ desc->skb_ptr->data,
+ MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
desc->status = DSC_OWNER_MAC;
desc = desc->vndescp;
} while (desc != lp->rx_ring);
@@ -491,12 +492,14 @@ static int r6040_close(struct net_device *dev)
/* Free Descriptor memory */
if (lp->rx_ring) {
- pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
+ pci_free_consistent(pdev,
+ RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
}
if (lp->tx_ring) {
- pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
+ pci_free_consistent(pdev,
+ TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
lp->tx_ring = NULL;
}
@@ -547,7 +550,7 @@ static int r6040_rx(struct net_device *dev, int limit)
}
goto next_descr;
}
-
+
/* Packet successfully received */
new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!new_skb) {
@@ -556,13 +559,13 @@ static int r6040_rx(struct net_device *dev, int limit)
}
skb_ptr = descptr->skb_ptr;
skb_ptr->dev = priv->dev;
-
+
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - 4);
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
-
+
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->stats.rx_packets++;
@@ -710,8 +713,10 @@ static int r6040_up(struct net_device *dev)
return ret;
/* improve performance (by RDC guys) */
- r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
- r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
+ r6040_phy_write(ioaddr, 30, 17,
+ (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
+ r6040_phy_write(ioaddr, 30, 17,
+ ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
r6040_phy_write(ioaddr, 0, 19, 0x0000);
r6040_phy_write(ioaddr, 0, 30, 0x01F0);
@@ -751,7 +756,7 @@ static int r6040_open(struct net_device *dev)
ret = request_irq(dev->irq, r6040_interrupt,
IRQF_SHARED, dev->name, dev);
if (ret)
- return ret;
+ goto out;
/* Set MAC address */
r6040_mac_address(dev);
@@ -759,30 +764,37 @@ static int r6040_open(struct net_device *dev)
/* Allocate Descriptor memory */
lp->rx_ring =
pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
- if (!lp->rx_ring)
- return -ENOMEM;
+ if (!lp->rx_ring) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
lp->tx_ring =
pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
if (!lp->tx_ring) {
- pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
- lp->rx_ring_dma);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_rx_ring;
}
ret = r6040_up(dev);
- if (ret) {
- pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
- lp->tx_ring_dma);
- pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
- lp->rx_ring_dma);
- return ret;
- }
+ if (ret)
+ goto err_free_tx_ring;
napi_enable(&lp->napi);
netif_start_queue(dev);
return 0;
+
+err_free_tx_ring:
+ pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
+ lp->tx_ring_dma);
+err_free_rx_ring:
+ pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
+ lp->rx_ring_dma);
+err_free_irq:
+ free_irq(dev->irq, dev);
+out:
+ return ret;
}
static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
@@ -946,7 +958,7 @@ static const struct net_device_ops r6040_netdev_ops = {
.ndo_set_multicast_list = r6040_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = r6040_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1039,7 +1051,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
u16 *adrp;
int i;
- printk("%s\n", version);
+ pr_info("%s\n", version);
err = pci_enable_device(pdev);
if (err)
@@ -1113,7 +1125,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
- netdev_warn(dev, "MAC address not initialized, generating random\n");
+ netdev_warn(dev, "MAC address not initialized, "
+ "generating random\n");
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 35540411990d..54900332f12d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1076,7 +1076,12 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
int ret;
if (vlgrp && (opts2 & RxVlanTag)) {
- __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
+ u16 vtag = swab16(opts2 & 0xffff);
+
+ if (likely(polling))
+ vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
+ else
+ __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
ret = 0;
} else
ret = -1;
@@ -3186,6 +3191,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_R8169_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif
+ dev->features |= NETIF_F_GRO;
tp->intr_mask = 0xffff;
tp->align = cfg->align;
@@ -3219,11 +3225,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
- pm_runtime_idle(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
out:
return rc;
@@ -3246,17 +3249,12 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- pm_runtime_get_sync(&pdev->dev);
-
flush_scheduled_work();
unregister_netdev(dev);
- if (pci_dev_run_wake(pdev)) {
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- }
- pm_runtime_put_noidle(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
@@ -4458,9 +4456,8 @@ static inline int rtl8169_fragmented_frame(u32 status)
return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
}
-static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
+static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 opts1 = le32_to_cpu(desc->opts1);
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
@@ -4468,7 +4465,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
((status == RxProtoIP) && !(opts1 & IPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
@@ -4554,8 +4551,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
continue;
}
- rtl8169_rx_csum(skb, desc);
-
if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
pci_dma_sync_single_for_device(pdev, addr,
pkt_size, PCI_DMA_FROMDEVICE);
@@ -4566,12 +4561,13 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
tp->Rx_skbuff[entry] = NULL;
}
+ rtl8169_rx_csum(skb, status);
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
if (likely(polling))
- netif_receive_skb(skb);
+ napi_gro_receive(&tp->napi, skb);
else
netif_rx(skb);
}
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index e26e107f93e0..e68c941926f1 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1245,7 +1245,7 @@ static int rr_open(struct net_device *dev)
init_timer(&rrpriv->timer);
rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
rrpriv->timer.data = (unsigned long)dev;
- rrpriv->timer.function = &rr_timer; /* timer handler */
+ rrpriv->timer.function = rr_timer; /* timer handler */
add_timer(&rrpriv->timer);
netif_start_queue(dev);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 18bc5b718bbb..c70ad515383a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -38,8 +38,6 @@
* Tx descriptors that can be associated with each corresponding FIFO.
* intr_type: This defines the type of interrupt. The values can be 0(INTA),
* 2(MSI_X). Default value is '2(MSI_X)'
- * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
- * Possible values '1' for enable '0' for disable. Default is '0'
* lro_max_pkts: This parameter defines maximum number of packets can be
* aggregated as a single large packet
* napi: This parameter used to enable/disable NAPI (polling Rx)
@@ -90,7 +88,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.26"
+#define DRV_VERSION "2.0.26.27"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -496,8 +494,6 @@ S2IO_PARM_INT(rxsync_frequency, 3);
/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
S2IO_PARM_INT(intr_type, 2);
/* Large receive offload feature */
-static unsigned int lro_enable = 1;
-module_param_named(lro, lro_enable, uint, 0);
/* Max pkts to be aggregated by LRO at one time. If not specified,
* aggregation happens until we hit max IP pkt size(64K)
@@ -5124,8 +5120,6 @@ static void s2io_set_multicast(struct net_device *dev)
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- memcpy(sp->usr_addrs[i].addr, ha->addr,
- ETH_ALEN);
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
mac_addr |= ha->addr[j];
@@ -6735,13 +6729,10 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
return -EINVAL;
if (data & ETH_FLAG_LRO) {
- if (lro_enable) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- changed = 1;
- }
- } else
- rc = -EINVAL;
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ changed = 1;
+ }
} else if (dev->features & NETIF_F_LRO) {
dev->features &= ~NETIF_F_LRO;
changed = 1;
@@ -6750,7 +6741,6 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
if (changed && netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
- sp->lro = !!(dev->features & NETIF_F_LRO);
rc = s2io_card_up(sp);
if (rc)
s2io_reset(sp);
@@ -7307,7 +7297,7 @@ static int s2io_card_up(struct s2io_nic *sp)
struct ring_info *ring = &mac_control->rings[i];
ring->mtu = dev->mtu;
- ring->lro = sp->lro;
+ ring->lro = !!(dev->features & NETIF_F_LRO);
ret = fill_rx_buffers(sp, ring, 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -7341,7 +7331,7 @@ static int s2io_card_up(struct s2io_nic *sp)
/* Setting its receive mode */
s2io_set_multicast(dev);
- if (sp->lro) {
+ if (dev->features & NETIF_F_LRO) {
/* Initialize max aggregatable pkts per session based on MTU */
sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
/* Check if we can use (if specified) user provided value */
@@ -7613,10 +7603,10 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
* Packet with erroneous checksum, let the
* upper layers deal with it.
*/
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
swstats->mem_freed += skb->truesize;
send_up:
@@ -7911,7 +7901,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
else
sp->device_type = XFRAME_I_DEVICE;
- sp->lro = lro_enable;
/* Initialize some PCI/PCI-X fields of the NIC. */
s2io_init_pci(sp);
@@ -8047,8 +8036,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->netdev_ops = &s2io_netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- if (lro_enable)
- dev->features |= NETIF_F_LRO;
+ dev->features |= NETIF_F_LRO;
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
@@ -8283,9 +8271,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->name);
}
- if (sp->lro)
- DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
- dev->name);
+ DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
+ dev->name);
if (ufo)
DBG_PRINT(ERR_DBG,
"%s: UDP Fragmentation Offload(UFO) enabled\n",
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0af033533905..00b8614efe48 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -816,12 +816,6 @@ struct mac_info {
struct stat_block *stats_info; /* Logical address of the stat block */
};
-/* structure representing the user defined MAC addresses */
-struct usr_addr {
- char addr[ETH_ALEN];
- int usage_cnt;
-};
-
/* Default Tunable parameters of the NIC. */
#define DEFAULT_FIFO_0_LEN 4096
#define DEFAULT_FIFO_1_7_LEN 512
@@ -894,9 +888,7 @@ struct s2io_nic {
#define ALL_MULTI 2
#define MAX_ADDRS_SUPPORTED 64
- u16 usr_addr_count;
u16 mc_addr_count;
- struct usr_addr usr_addrs[256];
u16 m_cast_flg;
u16 all_multi_pos;
@@ -971,7 +963,6 @@ struct s2io_nic {
unsigned long clubbed_frms_cnt;
unsigned long sending_both;
- u8 lro;
u16 lro_max_aggr_per_sess;
volatile unsigned long state;
u64 general_int_mask;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 8e6bd45b9f31..d8249d7653c6 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1170,7 +1170,7 @@ again:
sb->ip_summed = CHECKSUM_UNNECESSARY;
/* don't need to set sb->csum */
} else {
- sb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(sb);
}
}
prefetch(sb->data);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8c4067af32b0..31b92f5f32cb 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1251,16 +1251,6 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
return 0;
}
-static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct sc92031_priv *priv = netdev_priv(dev);
- struct pci_dev *pdev = priv->pdev;
-
- strcpy(drvinfo->driver, SC92031_NAME);
- strcpy(drvinfo->bus_info, pci_name(pdev));
-}
-
static void sc92031_ethtool_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wolinfo)
{
@@ -1382,7 +1372,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops sc92031_ethtool_ops = {
.get_settings = sc92031_ethtool_get_settings,
.set_settings = sc92031_ethtool_set_settings,
- .get_drvinfo = sc92031_ethtool_get_drvinfo,
.get_wol = sc92031_ethtool_get_wol,
.set_wol = sc92031_ethtool_set_wol,
.nway_reset = sc92031_ethtool_nway_reset,
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index ba674c5ca29e..f702f1fb63b6 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -114,7 +114,7 @@ static struct workqueue_struct *reset_workqueue;
* This is only used in MSI-X interrupt mode
*/
static unsigned int separate_tx_channels;
-module_param(separate_tx_channels, uint, 0644);
+module_param(separate_tx_channels, uint, 0444);
MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX");
@@ -201,10 +201,13 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
* Utility functions and prototypes
*
*************************************************************************/
-static void efx_remove_channel(struct efx_channel *channel);
+
+static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
-static void efx_fini_channels(struct efx_nic *efx);
+static void efx_fini_struct(struct efx_nic *efx);
+static void efx_start_all(struct efx_nic *efx);
+static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
@@ -248,7 +251,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_rx_strategy(channel);
- efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
return spent;
}
@@ -334,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
/* Disable interrupts and wait for ISRs to complete */
@@ -347,7 +351,7 @@ void efx_process_channel_now(struct efx_channel *channel)
napi_disable(&channel->napi_str);
/* Poll the channel */
- efx_process_channel(channel, EFX_EVQ_SIZE);
+ efx_process_channel(channel, channel->eventq_mask + 1);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
@@ -364,9 +368,18 @@ void efx_process_channel_now(struct efx_channel *channel)
*/
static int efx_probe_eventq(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
netif_dbg(channel->efx, probe, channel->efx->net_dev,
"chan %d create event queue\n", channel->channel);
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions. */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
return efx_nic_probe_eventq(channel);
}
@@ -403,6 +416,63 @@ static void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
+/* Allocate and initialise a channel structure, optionally copying
+ * parameters (but not resources) from an old channel structure. */
+static struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ if (old_channel) {
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+ } else {
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+ }
+
+ spin_lock_init(&channel->tx_stop_lock);
+ atomic_set(&channel->tx_stop_count, 1);
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
static int efx_probe_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
@@ -459,11 +529,38 @@ static void efx_set_channel_names(struct efx_nic *efx)
number -= efx->n_rx_channels;
}
}
- snprintf(channel->name, sizeof(channel->name),
+ snprintf(efx->channel_name[channel->channel],
+ sizeof(efx->channel_name[0]),
"%s%s-%d", efx->name, type, number);
}
}
+static int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ efx_for_each_channel(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
/* Channels are shutdown and reinitialised whilst the NIC is running
* to propagate configuration changes (mtu, checksum offload), or
* to clear hardware error conditions
@@ -601,6 +698,75 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_remove_eventq(channel);
}
+static void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+}
+
+int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ u32 old_rxq_entries, old_txq_entries;
+ unsigned i;
+ int rc;
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+ /* Clone channels */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx_alloc_channel(efx, i, efx->channel[i]);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto rollback;
+
+ /* Destroy old channels */
+ for (i = 0; i < efx->n_channels; i++)
+ efx_remove_channel(other_channel[i]);
+out:
+ /* Free unused channel structures */
+ for (i = 0; i < efx->n_channels; i++)
+ kfree(other_channel[i]);
+
+ efx_init_channels(efx);
+ efx_start_all(efx);
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
@@ -761,7 +927,7 @@ static int efx_probe_port(struct efx_nic *efx)
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
- goto err;
+ return rc;
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
@@ -782,7 +948,7 @@ static int efx_probe_port(struct efx_nic *efx)
return 0;
err:
- efx_remove_port(efx);
+ efx->type->remove_port(efx);
return rc;
}
@@ -1050,7 +1216,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_rx_channels = efx->n_channels;
}
for (i = 0; i < n_channels; i++)
- efx->channel[i].irq = xentries[i].vector;
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -1066,7 +1233,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
- efx->channel[0].irq = efx->pci_dev->irq;
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
} else {
netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n");
@@ -1097,26 +1264,32 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
+struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ unsigned tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[tx_channel_offset + index]->tx_queue[type];
+}
+
static void efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
unsigned tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+ /* Channel pointers were set in efx_init_struct() but we now
+ * need to clear them for TX queues in any RX-only channels. */
efx_for_each_channel(channel, efx) {
- if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
- channel->tx_queue = &efx->tx_queue[
- (channel->channel - tx_channel_offset) *
- EFX_TXQ_TYPES];
+ if (channel->channel - tx_channel_offset >=
+ efx->n_tx_channels) {
efx_for_each_channel_tx_queue(tx_queue, channel)
- tx_queue->channel = channel;
+ tx_queue->channel = NULL;
}
}
-
- efx_for_each_rx_queue(rx_queue, efx)
- rx_queue->channel = &efx->channel[rx_queue->queue];
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1165,40 +1338,28 @@ static void efx_remove_nic(struct efx_nic *efx)
static int efx_probe_all(struct efx_nic *efx)
{
- struct efx_channel *channel;
int rc;
- /* Create NIC */
rc = efx_probe_nic(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
- /* Create port */
rc = efx_probe_port(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
- /* Create channels */
- efx_for_each_channel(channel, efx) {
- rc = efx_probe_channel(channel);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "failed to create channel %d\n",
- channel->channel);
- goto fail3;
- }
- }
- efx_set_channel_names(efx);
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto fail3;
return 0;
fail3:
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
efx_remove_port(efx);
fail2:
efx_remove_nic(efx);
@@ -1328,10 +1489,7 @@ static void efx_stop_all(struct efx_nic *efx)
static void efx_remove_all(struct efx_nic *efx)
{
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
+ efx_remove_channels(efx);
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -1355,20 +1513,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution)
void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
bool rx_adaptive)
{
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
EFX_ASSERT_RESET_SERIALISED(efx);
- efx_for_each_tx_queue(tx_queue, efx)
- tx_queue->channel->irq_moderation = tx_ticks;
-
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
- efx_for_each_rx_queue(rx_queue, efx)
- rx_queue->channel->irq_moderation = rx_ticks;
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_get_rx_queue(channel))
+ channel->irq_moderation = rx_ticks;
+ else if (efx_channel_get_tx_queue(channel, 0))
+ channel->irq_moderation = tx_ticks;
+ }
}
/**************************************************************************
@@ -1546,11 +1704,11 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
stats->tx_packets = mac_stats->tx_packets;
stats->rx_bytes = mac_stats->rx_bytes;
stats->tx_bytes = mac_stats->tx_bytes;
+ stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
stats->multicast = mac_stats->rx_multicast;
stats->collisions = mac_stats->tx_collision;
stats->rx_length_errors = (mac_stats->rx_gtjumbo +
mac_stats->rx_length_error);
- stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
stats->rx_crc_errors = mac_stats->rx_bad;
stats->rx_frame_errors = mac_stats->rx_align_error;
stats->rx_fifo_errors = mac_stats->rx_overflow;
@@ -1767,6 +1925,7 @@ fail_registered:
static void efx_unregister_netdev(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev)
@@ -1777,8 +1936,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */
- efx_for_each_tx_queue(tx_queue, efx)
- efx_release_tx_buffers(tx_queue);
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_release_tx_buffers(tx_queue);
+ }
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -2037,9 +2198,6 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
int i;
/* Initialise common structures */
@@ -2068,36 +2226,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
INIT_WORK(&efx->mac_work, efx_mac_work);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- channel = &efx->channel[i];
- channel->efx = efx;
- channel->channel = i;
- channel->work_pending = false;
- spin_lock_init(&channel->tx_stop_lock);
- atomic_set(&channel->tx_stop_count, 1);
- }
- for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
- tx_queue = &efx->tx_queue[i];
- tx_queue->efx = efx;
- tx_queue->queue = i;
- tx_queue->buffer = NULL;
- tx_queue->channel = &efx->channel[0]; /* for safety */
- tx_queue->tso_headers_free = NULL;
- }
- for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
- rx_queue = &efx->rx_queue[i];
- rx_queue->efx = efx;
- rx_queue->queue = i;
- rx_queue->channel = &efx->channel[0]; /* for safety */
- rx_queue->buffer = NULL;
- setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
- (unsigned long)rx_queue);
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ goto fail;
}
efx->type = type;
- /* As close as we can get to guaranteeing that we don't overflow */
- BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
-
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */
@@ -2109,13 +2244,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
pci_name(pci_dev));
efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
if (!efx->workqueue)
- return -ENOMEM;
+ goto fail;
return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return -ENOMEM;
}
static void efx_fini_struct(struct efx_nic *efx)
{
+ int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ kfree(efx->channel[i]);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 060dc952a0fd..e783c0fedfd8 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -37,8 +37,6 @@ efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern void efx_stop_queue(struct efx_channel *channel);
extern void efx_wake_queue(struct efx_channel *channel);
-#define EFX_TXQ_SIZE 1024
-#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -53,13 +51,23 @@ extern void __efx_rx_packet(struct efx_channel *channel,
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard);
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
-#define EFX_RXQ_SIZE 1024
-#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
+
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+/* The smallest [rt]xq_entries that the driver supports. Callers of
+ * efx_wake_queue() assume that they can subsequently send at least one
+ * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
+#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
-#define EFX_EVQ_SIZE 4096
-#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
+extern int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
extern int efx_reconfigure_port(struct efx_nic *efx);
@@ -81,8 +89,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
-extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
-extern void efx_hex_dump(const u8 *, unsigned int, const char *);
/* Dummy PHY ops for PHY drivers */
extern int efx_port_dummy_op_int(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fd19d6ab97a2..7f735d804801 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -328,9 +328,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
unsigned int test_index,
struct ethtool_string *strings, u64 *data)
{
+ struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
- efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
@@ -673,15 +674,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
memset(coalesce, 0, sizeof(*coalesce));
/* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
- efx_for_each_tx_queue(tx_queue, efx) {
- channel = tx_queue->channel;
+ efx_for_each_channel(channel, efx) {
+ if (!efx_channel_get_tx_queue(channel, 0))
+ continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq =
@@ -708,7 +709,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
unsigned tx_usecs, rx_usecs, adaptive;
if (coalesce->use_adaptive_tx_coalesce)
@@ -725,8 +725,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
adaptive = coalesce->use_adaptive_rx_coalesce;
/* If the channel is shared only allow RX parameters to be set */
- efx_for_each_tx_queue(tx_queue, efx) {
- if ((tx_queue->channel->channel < efx->n_rx_channels) &&
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_get_rx_queue(channel) &&
+ efx_channel_get_tx_queue(channel, 0) &&
tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n");
@@ -741,6 +742,42 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
+static void efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+ ring->tx_pending < EFX_MIN_RING_SIZE) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX and RX queues cannot be smaller than %ld\n",
+ EFX_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+}
+
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
@@ -971,6 +1008,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_eeprom = efx_ethtool_set_eeprom,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
+ .get_ringparam = efx_ethtool_get_ringparam,
+ .set_ringparam = efx_ethtool_set_ringparam,
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_rx_csum = efx_ethtool_get_rx_csum,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 4f9d33f3cca1..b4d8efe67772 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -159,7 +159,6 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
- struct efx_channel *channel;
int syserr;
int queues;
@@ -194,15 +193,10 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
- /* Schedule processing of any interrupting queues */
- channel = &efx->channel[0];
- while (queues) {
- if (queues & 0x01)
- efx_schedule_channel(channel);
- channel++;
- queues >>= 1;
- }
-
+ if (queues & 1)
+ efx_schedule_channel(efx_get_channel(efx, 0));
+ if (queues & 2)
+ efx_schedule_channel(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
/**************************************************************************
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 64e7caa4bbb5..152342dbff29 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -137,6 +137,7 @@ struct efx_tx_buffer {
* @channel: The associated channel
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -170,6 +171,7 @@ struct efx_tx_queue {
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
+ unsigned int ptr_mask;
enum efx_flush_state flushed;
/* Members used mainly on the completion path */
@@ -225,10 +227,9 @@ struct efx_rx_page_state {
/**
* struct efx_rx_queue - An Efx RX queue
* @efx: The associated Efx NIC
- * @queue: DMA queue number
- * @channel: The associated channel
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
@@ -240,9 +241,6 @@ struct efx_rx_page_state {
* @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring
* refill was triggered.
- * @min_overfill: RX descriptor minimum overflow fill level.
- * This records the minimum fill level at which RX queue
- * overflow was observed. It should never be set.
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
@@ -250,10 +248,9 @@ struct efx_rx_page_state {
*/
struct efx_rx_queue {
struct efx_nic *efx;
- int queue;
- struct efx_channel *channel;
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
+ unsigned int ptr_mask;
int added_count;
int notified_count;
@@ -302,7 +299,6 @@ enum efx_rx_alloc_method {
*
* @efx: Associated Efx NIC
* @channel: Channel instance number
- * @name: Name for channel and IRQ
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -311,6 +307,7 @@ enum efx_rx_alloc_method {
* @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @magic_count: Event queue test event count
@@ -327,14 +324,14 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
- * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
+ * @rx_queue: RX queue for this channel
* @tx_stop_count: Core TX queue stop count
* @tx_stop_lock: Core TX queue stop lock
+ * @tx_queue: TX queues for this channel
*/
struct efx_channel {
struct efx_nic *efx;
int channel;
- char name[IFNAMSIZ + 6];
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -342,6 +339,7 @@ struct efx_channel {
struct napi_struct napi_str;
bool work_pending;
struct efx_special_buffer eventq;
+ unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
unsigned int magic_count;
@@ -366,9 +364,12 @@ struct efx_channel {
struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
- struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue rx_queue;
+
atomic_t tx_stop_count;
spinlock_t tx_stop_lock;
+
+ struct efx_tx_queue tx_queue[2];
};
enum efx_led_mode {
@@ -641,6 +642,9 @@ union efx_multicast_hash {
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
+ * @channel_name: Names for channels and their IRQs
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
* @next_buffer_table: First available buffer table id
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -724,10 +728,11 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
- struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
- struct efx_channel channel[EFX_MAX_CHANNELS];
+ struct efx_channel *channel[EFX_MAX_CHANNELS];
+ char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ unsigned rxq_entries;
+ unsigned txq_entries;
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
@@ -909,39 +914,67 @@ struct efx_nic_type {
*
*************************************************************************/
+static inline struct efx_channel *
+efx_get_channel(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_channels);
+ return efx->channel[index];
+}
+
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \
- for (_channel = &((_efx)->channel[0]); \
- _channel < &((_efx)->channel[(efx)->n_channels]); \
- _channel++)
-
-/* Iterate over all used TX queues */
-#define efx_for_each_tx_queue(_tx_queue, _efx) \
- for (_tx_queue = &((_efx)->tx_queue[0]); \
- _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
- (_efx)->n_tx_channels]); \
- _tx_queue++)
+ for (_channel = (_efx)->channel[0]; \
+ _channel; \
+ _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
+ (_efx)->channel[_channel->channel + 1] : NULL)
+
+extern struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+{
+ struct efx_tx_queue *tx_queue = channel->tx_queue;
+ EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
+ return tx_queue->channel ? tx_queue + type : NULL;
+}
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = (_channel)->tx_queue; \
+ for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
_tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++)
-/* Iterate over all used RX queues */
-#define efx_for_each_rx_queue(_rx_queue, _efx) \
- for (_rx_queue = &((_efx)->rx_queue[0]); \
- _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
- _rx_queue++)
+static inline struct efx_rx_queue *
+efx_get_rx_queue(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
+ return &efx->channel[index]->rx_queue;
+}
+
+static inline struct efx_rx_queue *
+efx_channel_get_rx_queue(struct efx_channel *channel)
+{
+ return channel->channel < channel->efx->n_rx_channels ?
+ &channel->rx_queue : NULL;
+}
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
+ for (_rx_queue = efx_channel_get_rx_queue(channel); \
_rx_queue; \
- _rx_queue = NULL) \
- if (_rx_queue->channel != (_channel)) \
- continue; \
- else
+ _rx_queue = NULL)
+
+static inline struct efx_channel *
+efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
+{
+ return container_of(rx_queue, struct efx_channel, rx_queue);
+}
+
+static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
+{
+ return efx_rx_queue_channel(rx_queue)->channel;
+}
/* Returns a pointer to the specified receive buffer in the RX
* descriptor queue.
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f595d920c7c4..6c5c0cefa9d8 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -263,8 +263,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
{
len = ALIGN(len, EFX_BUF_SIZE);
- buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
- &buffer->dma_addr);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_KERNEL);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -301,8 +301,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
(u64)buffer->dma_addr, buffer->len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
- pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
- buffer->dma_addr);
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
+ buffer->dma_addr);
buffer->addr = NULL;
buffer->entries = 0;
}
@@ -356,7 +356,7 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
unsigned write_ptr;
efx_dword_t reg;
- write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(tx_queue->efx, &reg,
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
@@ -377,7 +377,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
do {
- write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[write_ptr];
txd = efx_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
@@ -398,10 +398,11 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
- EFX_TXQ_SIZE & EFX_TXQ_MASK);
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
- EFX_TXQ_SIZE * sizeof(efx_qword_t));
+ entries * sizeof(efx_qword_t));
}
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
@@ -526,30 +527,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
*/
void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
+ struct efx_nic *efx = rx_queue->efx;
efx_dword_t reg;
unsigned write_ptr;
while (rx_queue->notified_count != rx_queue->added_count) {
- efx_build_rx_desc(rx_queue,
- rx_queue->notified_count &
- EFX_RXQ_MASK);
+ efx_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
++rx_queue->notified_count;
}
wmb();
- write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(rx_queue->efx, &reg,
- FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
}
int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
- EFX_RXQ_SIZE & EFX_RXQ_MASK);
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &rx_queue->rxd,
- EFX_RXQ_SIZE * sizeof(efx_qword_t));
+ entries * sizeof(efx_qword_t));
}
void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
@@ -561,7 +564,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
- rx_queue->queue, rx_queue->rxd.index,
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->flushed = FLUSH_NONE;
@@ -575,9 +578,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
- rx_queue->channel->channel,
+ efx_rx_queue_channel(rx_queue)->channel,
FRF_AZ_RX_DESCQ_OWNER_ID, 0,
- FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
@@ -585,7 +589,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- rx_queue->queue);
+ efx_rx_queue_index(rx_queue));
}
static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -598,7 +602,8 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
/* Post a flush command */
EFX_POPULATE_OWORD_2(rx_flush_descq,
FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
}
@@ -613,7 +618,7 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- rx_queue->queue);
+ efx_rx_queue_index(rx_queue));
/* Unpin RX descriptor ring */
efx_fini_special_buffer(efx, &rx_queue->rxd);
@@ -680,15 +685,17 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = &efx->tx_queue[tx_ev_q_label];
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- EFX_TXQ_MASK);
+ tx_queue->ptr_mask);
channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = &efx->tx_queue[tx_ev_q_label];
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev);
@@ -714,6 +721,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
bool *rx_ev_pkt_ok,
bool *discard)
{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
@@ -746,14 +754,14 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
if (rx_ev_frm_trunc)
- ++rx_queue->channel->n_rx_frm_trunc;
+ ++channel->n_rx_frm_trunc;
else if (rx_ev_tobe_disc)
- ++rx_queue->channel->n_rx_tobe_disc;
+ ++channel->n_rx_tobe_disc;
else if (!efx->loopback_selftest) {
if (rx_ev_ip_hdr_chksum_err)
- ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
+ ++channel->n_rx_ip_hdr_chksum_err;
else if (rx_ev_tcp_udp_chksum_err)
- ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
+ ++channel->n_rx_tcp_udp_chksum_err;
}
/* The frame must be discarded if any of these are true. */
@@ -769,7 +777,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- rx_queue->queue, EFX_QWORD_VAL(*event),
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
" [IP_HDR_CHKSUM_ERR]" : "",
@@ -791,8 +799,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
- expected = rx_queue->removed_count & EFX_RXQ_MASK;
- dropped = (index - expected) & EFX_RXQ_MASK;
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
@@ -827,10 +835,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
- rx_queue = &efx->rx_queue[channel->channel];
+ rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
+ expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
if (unlikely(rx_ev_desc_ptr != expected_ptr))
efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
@@ -879,7 +887,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
else
netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n",
@@ -997,6 +1005,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
+ struct efx_nic *efx = channel->efx;
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
@@ -1021,7 +1030,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
EFX_SET_QWORD(*p_event);
/* Increment read pointer */
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+ read_ptr = (read_ptr + 1) & channel->eventq_mask;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@@ -1033,7 +1042,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
break;
case FSE_AZ_EV_CODE_TX_EV:
tx_packets += efx_handle_tx_event(channel, &event);
- if (tx_packets >= EFX_TXQ_SIZE) {
+ if (tx_packets > efx->txq_entries) {
spent = budget;
goto out;
}
@@ -1068,10 +1077,11 @@ out:
int efx_nic_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
- EFX_EVQ_SIZE & EFX_EVQ_MASK);
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
return efx_alloc_special_buffer(efx, &channel->eventq,
- EFX_EVQ_SIZE * sizeof(efx_qword_t));
+ entries * sizeof(efx_qword_t));
}
void efx_nic_init_eventq(struct efx_channel *channel)
@@ -1163,11 +1173,11 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
static void efx_poll_flush_events(struct efx_nic *efx)
{
- struct efx_channel *channel = &efx->channel[0];
+ struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
+ unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1185,7 +1195,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA);
if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx->tx_queue + ev_queue;
+ tx_queue = efx_get_tx_queue(
+ efx, ev_queue / EFX_TXQ_TYPES,
+ ev_queue % EFX_TXQ_TYPES);
tx_queue->flushed = FLUSH_DONE;
}
} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
@@ -1195,7 +1207,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (ev_queue < efx->n_rx_channels) {
- rx_queue = efx->rx_queue + ev_queue;
+ rx_queue = efx_get_rx_queue(efx, ev_queue);
rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE;
}
@@ -1205,7 +1217,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+ read_ptr = (read_ptr + 1) & channel->eventq_mask;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
@@ -1216,6 +1228,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* serialise them */
int efx_nic_flush_queues(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int i, tx_pending, rx_pending;
@@ -1224,29 +1237,35 @@ int efx_nic_flush_queues(struct efx_nic *efx)
efx->type->prepare_flush(efx);
/* Flush all tx queues in parallel */
- efx_for_each_tx_queue(tx_queue, efx)
- efx_flush_tx_queue(tx_queue);
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_flush_tx_queue(tx_queue);
+ }
/* The hardware supports four concurrent rx flushes, each of which may
* need to be retried if there is an outstanding descriptor fetch */
for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
rx_pending = tx_pending = 0;
- efx_for_each_rx_queue(rx_queue, efx) {
- if (rx_queue->flushed == FLUSH_PENDING)
- ++rx_pending;
- }
- efx_for_each_rx_queue(rx_queue, efx) {
- if (rx_pending == EFX_RX_FLUSH_COUNT)
- break;
- if (rx_queue->flushed == FLUSH_FAILED ||
- rx_queue->flushed == FLUSH_NONE) {
- efx_flush_rx_queue(rx_queue);
- ++rx_pending;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed == FLUSH_PENDING)
+ ++rx_pending;
}
}
- efx_for_each_tx_queue(tx_queue, efx) {
- if (tx_queue->flushed != FLUSH_DONE)
- ++tx_pending;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_pending == EFX_RX_FLUSH_COUNT)
+ break;
+ if (rx_queue->flushed == FLUSH_FAILED ||
+ rx_queue->flushed == FLUSH_NONE) {
+ efx_flush_rx_queue(rx_queue);
+ ++rx_pending;
+ }
+ }
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->flushed != FLUSH_DONE)
+ ++tx_pending;
+ }
}
if (rx_pending == 0 && tx_pending == 0)
@@ -1258,19 +1277,21 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */
- efx_for_each_tx_queue(tx_queue, efx) {
- if (tx_queue->flushed != FLUSH_DONE)
- netif_err(efx, hw, efx->net_dev,
- "tx queue %d flush command timed out\n",
- tx_queue->queue);
- tx_queue->flushed = FLUSH_DONE;
- }
- efx_for_each_rx_queue(rx_queue, efx) {
- if (rx_queue->flushed != FLUSH_DONE)
- netif_err(efx, hw, efx->net_dev,
- "rx queue %d flush command timed out\n",
- rx_queue->queue);
- rx_queue->flushed = FLUSH_DONE;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "tx queue %d flush command timed out\n",
+ tx_queue->queue);
+ tx_queue->flushed = FLUSH_DONE;
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flushed != FLUSH_DONE)
+ netif_err(efx, hw, efx->net_dev,
+ "rx queue %d flush command timed out\n",
+ efx_rx_queue_index(rx_queue));
+ rx_queue->flushed = FLUSH_DONE;
+ }
}
return -ETIMEDOUT;
@@ -1457,7 +1478,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
*/
static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
{
- struct efx_channel *channel = dev_id;
+ struct efx_channel *channel = *(struct efx_channel **)dev_id;
struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
@@ -1532,7 +1553,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
- channel->name, channel);
+ efx->channel_name[channel->channel],
+ &efx->channel[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1544,7 +1566,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
fail2:
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, channel);
+ free_irq(channel->irq, &efx->channel[channel->channel]);
fail1:
return rc;
}
@@ -1557,7 +1579,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx) {
if (channel->irq)
- free_irq(channel->irq, channel);
+ free_irq(channel->irq, &efx->channel[channel->channel]);
}
/* ACK legacy interrupt */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 799c461ce7b8..6d0959b5158e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -133,7 +133,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
unsigned index, count;
for (count = 0; count < EFX_RX_BATCH; ++count) {
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
@@ -208,7 +208,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
dma_addr += sizeof(struct efx_rx_page_state);
split:
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->skb = NULL;
@@ -285,7 +285,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
* we'd like to insert an additional descriptor whilst leaving
* EFX_RXD_HEAD_ROOM for the non-recycle path */
fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
- if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+ if (unlikely(fill_level > rx_queue->max_fill)) {
/* We could place "state" on a list, and drain the list in
* efx_fast_push_rx_descriptors(). For now, this will do. */
return;
@@ -294,7 +294,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
++state->refcnt;
get_page(rx_buf->page);
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
new_buf->skb = NULL;
@@ -311,7 +311,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_rx_buffer *new_buf;
unsigned index;
@@ -319,7 +319,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
page_count(rx_buf->page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
- index = rx_queue->added_count & EFX_RXQ_MASK;
+ index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
@@ -341,13 +341,13 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
*/
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_channel *channel = rx_queue->channel;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
unsigned fill_level;
int space, rc = 0;
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
+ EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
@@ -364,7 +364,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
" level %d to level %d using %s allocation\n",
- rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->fast_fill_limit,
channel->rx_alloc_push_pages ? "page" : "skb");
do {
@@ -382,7 +383,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
- "to level %d\n", rx_queue->queue,
+ "to level %d\n", efx_rx_queue_index(rx_queue),
rx_queue->added_count - rx_queue->removed_count);
out:
@@ -393,7 +394,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
void efx_rx_slow_fill(unsigned long context)
{
struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
- struct efx_channel *channel = rx_queue->channel;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(channel);
@@ -421,7 +422,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d seriously overlength "
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
- rx_queue->queue, len, max_len,
+ efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
/* If this buffer was skb-allocated, then the meta
* data at the end of the skb will be trashed. So
@@ -434,10 +435,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d overlength RX event "
"(0x%x > 0x%x)\n",
- rx_queue->queue, len, max_len);
+ efx_rx_queue_index(rx_queue), len, max_len);
}
- rx_queue->channel->n_rx_overlength++;
+ efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Pass a received packet up through the generic LRO stack
@@ -507,7 +508,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
- struct efx_channel *channel = rx_queue->channel;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
bool leak_packet = false;
@@ -528,7 +529,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x at %llx+%x %s%s\n",
- rx_queue->queue, index,
+ efx_rx_queue_index(rx_queue), index,
(unsigned long long)rx_buf->dma_addr, len,
(checksummed ? " [SUMMED]" : ""),
(discard ? " [DISCARD]" : ""));
@@ -560,12 +561,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
rx_buf->len = len;
out:
- if (rx_queue->channel->rx_pkt)
- __efx_rx_packet(rx_queue->channel,
- rx_queue->channel->rx_pkt,
- rx_queue->channel->rx_pkt_csummed);
- rx_queue->channel->rx_pkt = rx_buf;
- rx_queue->channel->rx_pkt_csummed = checksummed;
+ if (channel->rx_pkt)
+ __efx_rx_packet(channel,
+ channel->rx_pkt, channel->rx_pkt_csummed);
+ channel->rx_pkt = rx_buf;
+ channel->rx_pkt_csummed = checksummed;
}
/* Handle a received packet. Second half: Touches packet payload. */
@@ -615,7 +615,7 @@ void __efx_rx_packet(struct efx_channel *channel,
EFX_BUG_ON_PARANOID(!skb);
/* Set the SKB flags */
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
/* Pass the packet up */
netif_receive_skb(skb);
@@ -650,15 +650,22 @@ void efx_rx_strategy(struct efx_channel *channel)
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- unsigned int rxq_size;
+ unsigned int entries;
int rc;
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
netif_dbg(efx, probe, efx->net_dev,
- "creating RX queue %d\n", rx_queue->queue);
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
/* Allocate RX buffers */
- rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
- rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
+ rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
@@ -672,20 +679,20 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
+ struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, limit;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "initialising RX queue %d\n", rx_queue->queue);
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
- rx_queue->min_overfill = -1U;
/* Initialise limit fields */
- max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
limit = max_fill * min(rx_refill_limit, 100U) / 100U;
@@ -703,14 +710,14 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
struct efx_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "shutting down RX queue %d\n", rx_queue->queue);
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers NB start at index 0 not current HW ptr */
if (rx_queue->buffer) {
- for (i = 0; i <= EFX_RXQ_MASK; i++) {
+ for (i = 0; i <= rx_queue->ptr_mask; i++) {
rx_buf = efx_rx_buffer(rx_queue, i);
efx_fini_rx_buffer(rx_queue, rx_buf);
}
@@ -720,7 +727,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
- "destroying RX queue %d\n", rx_queue->queue);
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
efx_nic_remove_rx(rx_queue);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 85f015f005d5..da4473b71058 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -506,7 +506,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
- state->packet_count = EFX_TXQ_SIZE / 3;
+ state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kzalloc(sizeof(state->skbs[0]) *
state->packet_count, GFP_KERNEL);
@@ -567,7 +567,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
} else {
- struct efx_channel *channel = &efx->channel[0];
+ struct efx_channel *channel = efx_get_channel(efx, 0);
if (channel->work_pending)
efx_process_channel_now(channel);
}
@@ -594,6 +594,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode mode;
struct efx_loopback_state *state;
+ struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue;
int rc = 0;
@@ -634,7 +635,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
}
/* Test both types of TX queue */
- efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 3fab030f8ab5..9f5368049694 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -450,7 +450,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
- mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good;
+ MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
MAC_STAT(rx_pause, RX_PAUSE_PKTS);
MAC_STAT(rx_control, RX_CONTROL_PKTS);
MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index c6942da2c99a..11726989fe2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -28,7 +28,7 @@
* The tx_queue descriptor ring fill-level must fall below this value
* before we restart the netif queue
*/
-#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
+#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
/* We need to be able to nest calls to netif_tx_stop_queue(), partly
* because of the 2 hardware queues associated with each core queue,
@@ -37,8 +37,9 @@
void efx_stop_queue(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
- if (!channel->tx_queue)
+ if (!tx_queue)
return;
spin_lock_bh(&channel->tx_stop_lock);
@@ -46,9 +47,8 @@ void efx_stop_queue(struct efx_channel *channel)
atomic_inc(&channel->tx_stop_count);
netif_tx_stop_queue(
- netdev_get_tx_queue(
- efx->net_dev,
- channel->tx_queue->queue / EFX_TXQ_TYPES));
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock_bh(&channel->tx_stop_lock);
}
@@ -57,8 +57,9 @@ void efx_stop_queue(struct efx_channel *channel)
void efx_wake_queue(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
- if (!channel->tx_queue)
+ if (!tx_queue)
return;
local_bh_disable();
@@ -66,9 +67,8 @@ void efx_wake_queue(struct efx_channel *channel)
&channel->tx_stop_lock)) {
netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
netif_tx_wake_queue(
- netdev_get_tx_queue(
- efx->net_dev,
- channel->tx_queue->queue / EFX_TXQ_TYPES));
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
@@ -207,7 +207,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
/* Map for DMA. Use pci_map_single rather than pci_map_page
* since this is more efficient on machines with sparse
@@ -244,14 +244,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
&tx_queue->read_count;
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0))
goto stop;
smp_mb();
--tx_queue->stopped;
}
- insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -320,7 +320,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
- insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
efx_dequeue_buffer(tx_queue, buffer);
buffer->len = 0;
@@ -350,8 +350,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
- stop_index = (index + 1) & EFX_TXQ_MASK;
- read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -368,7 +368,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
buffer->len = 0;
++tx_queue->read_count;
- read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
@@ -390,9 +390,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
- tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- tx_queue += EFX_TXQ_TYPE_OFFLOAD;
+ tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
+ skb->ip_summed == CHECKSUM_PARTIAL ?
+ EFX_TXQ_TYPE_OFFLOAD : 0);
return efx_enqueue_skb(tx_queue, skb);
}
@@ -402,7 +402,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
- EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
+ EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index);
@@ -412,7 +412,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
smp_mb();
if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
- if (fill_level < EFX_TXQ_THRESHOLD) {
+ if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
/* Do this under netif_tx_lock(), to avoid racing
@@ -430,18 +430,24 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- unsigned int txq_size;
+ unsigned int entries;
int i, rc;
- netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
- tx_queue->queue);
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
- tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
+ tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
- for (i = 0; i <= EFX_TXQ_MASK; ++i)
+ for (i = 0; i <= tx_queue->ptr_mask; ++i)
tx_queue->buffer[i].continuation = true;
/* Allocate hardware ring */
@@ -481,7 +487,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
- buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer);
buffer->continuation = true;
buffer->len = 0;
@@ -741,7 +747,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
/* -1 as there is no way to represent all descriptors used */
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
while (1) {
if (unlikely(q_space-- <= 0)) {
@@ -757,7 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
*(volatile unsigned *)&tx_queue->read_count;
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
- q_space = EFX_TXQ_MASK - 1 - fill_level;
+ q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0)) {
*final_buffer = NULL;
return 1;
@@ -766,13 +772,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
--tx_queue->stopped;
}
- insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
+ insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
++tx_queue->insert_count;
EFX_BUG_ON_PARANOID(tx_queue->insert_count -
- tx_queue->read_count >
- EFX_TXQ_MASK);
+ tx_queue->read_count >=
+ efx->txq_entries);
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->len);
@@ -813,7 +819,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
{
struct efx_tx_buffer *buffer;
- buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
+ buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -838,7 +844,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
buffer = &tx_queue->buffer[tx_queue->insert_count &
- EFX_TXQ_MASK];
+ tx_queue->ptr_mask];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
if (buffer->unmap_len) {
@@ -1168,7 +1174,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
unsigned i;
if (tx_queue->buffer) {
- for (i = 0; i <= EFX_TXQ_MASK; ++i)
+ for (i = 0; i <= tx_queue->ptr_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index f5a9eb1df593..50259dfec583 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -798,7 +798,7 @@ static int sh_eth_rx(struct net_device *ndev)
skb->dev = ndev;
sh_eth_set_receive_align(skb);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
if (entry >= RX_RING_SIZE - 1)
@@ -1031,7 +1031,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
mdp->duplex = -1;
/* Try connect to PHY */
- phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
+ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
static int sh_eth_drv_probe(struct platform_device *pdev)
{
- int ret, i, devno = 0;
+ int ret, devno = 0;
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index bbbded76ff14..ffdd8591d4bc 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1042,7 +1042,7 @@ sis900_open(struct net_device *net_dev)
init_timer(&sis_priv->timer);
sis_priv->timer.expires = jiffies + HZ;
sis_priv->timer.data = (unsigned long)net_dev;
- sis_priv->timer.function = &sis900_timer;
+ sis_priv->timer.function = sis900_timer;
add_timer(&sis_priv->timer);
return 0;
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 31b2dabf094c..8a12bd9d28ba 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -209,7 +209,7 @@ static int skfp_init_one(struct pci_dev *pdev,
void __iomem *mem;
int err;
- pr_debug(KERN_INFO "entering skfp_init_one\n");
+ pr_debug("entering skfp_init_one\n");
if (num_boards == 0)
printk("%s\n", boot_msg);
@@ -385,7 +385,7 @@ static int skfp_driver_init(struct net_device *dev)
skfddi_priv *bp = &smc->os;
int err = -EIO;
- pr_debug(KERN_INFO "entering skfp_driver_init\n");
+ pr_debug("entering skfp_driver_init\n");
// set the io address in private structures
bp->base_addr = dev->base_addr;
@@ -405,7 +405,7 @@ static int skfp_driver_init(struct net_device *dev)
// Determine the required size of the 'shared' memory area.
bp->SharedMemSize = mac_drv_check_space();
- pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
+ pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
if (bp->SharedMemSize > 0) {
bp->SharedMemSize += 16; // for descriptor alignment
@@ -429,13 +429,13 @@ static int skfp_driver_init(struct net_device *dev)
card_stop(smc); // Reset adapter.
- pr_debug(KERN_INFO "mac_drv_init()..\n");
+ pr_debug("mac_drv_init()..\n");
if (mac_drv_init(smc) != 0) {
- pr_debug(KERN_INFO "mac_drv_init() failed.\n");
+ pr_debug("mac_drv_init() failed\n");
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
+ pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
@@ -485,7 +485,7 @@ static int skfp_open(struct net_device *dev)
struct s_smc *smc = netdev_priv(dev);
int err;
- pr_debug(KERN_INFO "entering skfp_open\n");
+ pr_debug("entering skfp_open\n");
/* Register IRQ - support shared interrupts by passing device ptr */
err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
dev->name, dev);
@@ -856,12 +856,12 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
- pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
+ pr_debug("PROMISCUOUS MODE ENABLED\n");
}
/* Else, update multicast address table */
else {
mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
- pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
+ pr_debug("PROMISCUOUS MODE DISABLED\n");
// Reset all MC addresses
mac_clear_multicast(smc);
@@ -869,7 +869,7 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
- pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ pr_debug("ENABLE ALL MC ADDRESSES\n");
} else if (!netdev_mc_empty(dev)) {
if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
@@ -880,18 +880,18 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
(struct fddi_addr *)ha->addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
- ha->addr);
+ pr_debug("ENABLE MC ADDRESS: %pMF\n",
+ ha->addr);
}
} else { // more MC addresses than HW supports
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
- pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
+ pr_debug("ENABLE ALL MC ADDRESSES\n");
}
} else { // no MC addresses
- pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
+ pr_debug("DISABLE ALL MC ADDRESSES\n");
}
/* Update adapter filters */
@@ -1045,7 +1045,7 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *bp = &smc->os;
- pr_debug(KERN_INFO "skfp_send_pkt\n");
+ pr_debug("skfp_send_pkt\n");
/*
* Verify that incoming transmit request is OK
@@ -1114,13 +1114,13 @@ static void send_queued_packets(struct s_smc *smc)
int frame_status; // HWM tx frame status.
- pr_debug(KERN_INFO "send queued packets\n");
+ pr_debug("send queued packets\n");
for (;;) {
// send first buffer from queue
skb = skb_dequeue(&bp->SendSkbQueue);
if (!skb) {
- pr_debug(KERN_INFO "queue empty\n");
+ pr_debug("queue empty\n");
return;
} // queue empty !
@@ -1232,7 +1232,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
static void ResetAdapter(struct s_smc *smc)
{
- pr_debug(KERN_INFO "[fddi: ResetAdapter]\n");
+ pr_debug("[fddi: ResetAdapter]\n");
// Stop the adapter.
@@ -1278,7 +1278,7 @@ void llc_restart_tx(struct s_smc *smc)
{
skfddi_priv *bp = &smc->os;
- pr_debug(KERN_INFO "[llc_restart_tx]\n");
+ pr_debug("[llc_restart_tx]\n");
// Try to send queued packets
spin_unlock(&bp->DriverLock);
@@ -1308,7 +1308,7 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
{
void *virt;
- pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
+ pr_debug("mac_drv_get_space (%d bytes), ", size);
virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
@@ -1317,9 +1317,9 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
}
smc->os.SharedMemHeap += size; // Move heap pointer.
- pr_debug(KERN_INFO "mac_drv_get_space end\n");
- pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt);
- pr_debug(KERN_INFO "bus addr: %lx\n", (ulong)
+ pr_debug("mac_drv_get_space end\n");
+ pr_debug("virt addr: %lx\n", (ulong) virt);
+ pr_debug("bus addr: %lx\n", (ulong)
(smc->os.SharedMemDMA +
((char *) virt - (char *)smc->os.SharedMemAddr)));
return (virt);
@@ -1349,7 +1349,7 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
char *virt;
- pr_debug(KERN_INFO "mac_drv_get_desc_mem\n");
+ pr_debug("mac_drv_get_desc_mem\n");
// Descriptor memory must be aligned on 16-byte boundary.
@@ -1493,7 +1493,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
{
struct sk_buff *skb;
- pr_debug(KERN_INFO "entering mac_drv_tx_complete\n");
+ pr_debug("entering mac_drv_tx_complete\n");
// Check if this TxD points to a skb
if (!(skb = txd->txd_os.skb)) {
@@ -1513,7 +1513,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
// free the skb
dev_kfree_skb_irq(skb);
- pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n");
+ pr_debug("leaving mac_drv_tx_complete\n");
} // mac_drv_tx_complete
@@ -1580,7 +1580,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
unsigned short ri;
u_int RifLength;
- pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
+ pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
if (frag_count != 1) { // This is not allowed to happen.
printk("fddi: Multi-fragment receive!\n");
@@ -1589,7 +1589,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
}
skb = rxd->rxd_os.skb;
if (!skb) {
- pr_debug(KERN_INFO "No skb in rxd\n");
+ pr_debug("No skb in rxd\n");
smc->os.MacStat.gen.rx_errors++;
goto RequeueRxd;
}
@@ -1619,7 +1619,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
else {
int n;
// goos: RIF removal has still to be tested
- pr_debug(KERN_INFO "RIF found\n");
+ pr_debug("RIF found\n");
// Get RIF length from Routing Control (RC) field.
cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
@@ -1664,7 +1664,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
return;
RequeueRxd:
- pr_debug(KERN_INFO "Rx: re-queue RXD.\n");
+ pr_debug("Rx: re-queue RXD.\n");
mac_drv_requeue_rxd(smc, rxd, frag_count);
smc->os.MacStat.gen.rx_errors++; // Count receive packets
// not indicated.
@@ -1775,7 +1775,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
struct sk_buff *skb;
volatile struct s_smt_fp_rxd *rxd;
- pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");
+ pr_debug("entering mac_drv_fill_rxd\n");
// Walk through the list of free receive buffers, passing receive
// buffers to the HWM as long as RXDs are available.
@@ -1783,7 +1783,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
MaxFrameSize = smc->os.MaxFrameSize;
// Check if there is any RXD left.
while (HWM_GET_RX_FREE(smc) > 0) {
- pr_debug(KERN_INFO ".\n");
+ pr_debug(".\n");
rxd = HWM_GET_CURR_RXD(smc);
skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
@@ -1814,7 +1814,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
FIRST_FRAG | LAST_FRAG);
}
- pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
+ pr_debug("leaving mac_drv_fill_rxd\n");
} // mac_drv_fill_rxd
@@ -2034,17 +2034,17 @@ void smt_stat_counter(struct s_smc *smc, int stat)
{
// BOOLEAN RingIsUp ;
- pr_debug(KERN_INFO "smt_stat_counter\n");
+ pr_debug("smt_stat_counter\n");
switch (stat) {
case 0:
- pr_debug(KERN_INFO "Ring operational change.\n");
+ pr_debug("Ring operational change.\n");
break;
case 1:
- pr_debug(KERN_INFO "Receive fifo overflow.\n");
+ pr_debug("Receive fifo overflow.\n");
smc->os.MacStat.gen.rx_errors++;
break;
default:
- pr_debug(KERN_INFO "Unknown status (%d).\n", stat);
+ pr_debug("Unknown status (%d).\n", stat);
break;
}
} // smt_stat_counter
@@ -2100,10 +2100,10 @@ void cfm_state_change(struct s_smc *smc, int c_state)
s = "SC11_C_WRAP_S";
break;
default:
- pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
+ pr_debug("cfm_state_change: unknown %d\n", c_state);
return;
}
- pr_debug(KERN_INFO "cfm_state_change: %s\n", s);
+ pr_debug("cfm_state_change: %s\n", s);
#endif // DRIVERDEBUG
} // cfm_state_change
@@ -2158,7 +2158,7 @@ void ecm_state_change(struct s_smc *smc, int e_state)
s = "unknown";
break;
}
- pr_debug(KERN_INFO "ecm_state_change: %s\n", s);
+ pr_debug("ecm_state_change: %s\n", s);
#endif //DRIVERDEBUG
} // ecm_state_change
@@ -2213,7 +2213,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
s = "unknown";
break;
}
- pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s);
+ pr_debug("[rmt_state_change: %s]\n", s);
#endif // DRIVERDEBUG
} // rmt_state_change
@@ -2233,7 +2233,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
************************/
void drv_reset_indication(struct s_smc *smc)
{
- pr_debug(KERN_INFO "entering drv_reset_indication\n");
+ pr_debug("entering drv_reset_indication\n");
smc->os.ResetRequested = TRUE; // Set flag.
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46e7571..a8a63581d63d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3178,8 +3178,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
if (likely(skb)) {
- netif_receive_skb(skb);
-
+ napi_gro_receive(napi, skb);
++work_done;
}
}
@@ -3192,6 +3191,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
if (work_done < to_do) {
unsigned long flags;
+ napi_gro_flush(napi);
spin_lock_irqsave(&hw->hw_lock, flags);
__napi_complete(napi);
hw->intr_mask |= napimask[skge->port];
@@ -3849,6 +3849,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
skge->rx_csum = 1;
}
+ dev->features |= NETIF_F_GRO;
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 194e5cf8c763..3ef9b67ac6e6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4581,7 +4581,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port;
- dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
+ | NETIF_F_TSO | NETIF_F_GRO;
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index fa434fb8fb7c..38547a8938fe 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -271,7 +271,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
memcpy(sl->xbuff, sl->xhead, sl->xleft);
} else {
sl->xleft = 0;
- sl->tx_dropped++;
+ dev->stats.tx_dropped++;
}
}
sl->xhead = sl->xbuff;
@@ -281,7 +281,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
memcpy(sl->rbuff, rbuff, sl->rcount);
} else {
sl->rcount = 0;
- sl->rx_over_errors++;
+ dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
@@ -319,6 +319,7 @@ static inline void sl_unlock(struct slip *sl)
/* Send one completely decapsulated IP datagram to the IP layer. */
static void sl_bump(struct slip *sl)
{
+ struct net_device *dev = sl->dev;
struct sk_buff *skb;
int count;
@@ -329,13 +330,13 @@ static void sl_bump(struct slip *sl)
if (c & SL_TYPE_COMPRESSED_TCP) {
/* ignore compressed packets when CSLIP is off */
if (!(sl->mode & SL_MODE_CSLIP)) {
- printk(KERN_WARNING "%s: compressed packet ignored\n", sl->dev->name);
+ printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
return;
}
/* make sure we've reserved enough space for uncompress
to use */
if (count + 80 > sl->buffsize) {
- sl->rx_over_errors++;
+ dev->stats.rx_over_errors++;
return;
}
count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
@@ -346,7 +347,7 @@ static void sl_bump(struct slip *sl)
/* turn on header compression */
sl->mode |= SL_MODE_CSLIP;
sl->mode &= ~SL_MODE_ADAPTIVE;
- printk(KERN_INFO "%s: header compression turned on\n", sl->dev->name);
+ printk(KERN_INFO "%s: header compression turned on\n", dev->name);
}
sl->rbuff[0] &= 0x4f;
if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
@@ -355,20 +356,20 @@ static void sl_bump(struct slip *sl)
}
#endif /* SL_INCLUDE_CSLIP */
- sl->rx_bytes += count;
+ dev->stats.rx_bytes += count;
skb = dev_alloc_skb(count);
if (skb == NULL) {
- printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", sl->dev->name);
- sl->rx_dropped++;
+ printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
+ dev->stats.rx_dropped++;
return;
}
- skb->dev = sl->dev;
+ skb->dev = dev;
memcpy(skb_put(skb, count), sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
- sl->rx_packets++;
+ dev->stats.rx_packets++;
}
/* Encapsulate one IP datagram and stuff into a TTY queue. */
@@ -379,7 +380,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
- sl->tx_dropped++;
+ sl->dev->stats.tx_dropped++;
sl_unlock(sl);
return;
}
@@ -433,7 +434,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
- sl->tx_packets++;
+ sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sl_unlock(sl);
return;
@@ -496,7 +497,7 @@ sl_xmit(struct sk_buff *skb, struct net_device *dev)
}
sl_lock(sl);
- sl->tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
sl_encaps(sl, skb->data, skb->len);
spin_unlock(&sl->lock);
@@ -558,39 +559,39 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
/* Netdevice get statistics request */
-static struct net_device_stats *
-sl_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- static struct net_device_stats stats;
- struct slip *sl = netdev_priv(dev);
+ struct net_device_stats *devstats = &dev->stats;
+ unsigned long c_rx_dropped = 0;
#ifdef SL_INCLUDE_CSLIP
- struct slcompress *comp;
-#endif
+ unsigned long c_rx_fifo_errors = 0;
+ unsigned long c_tx_fifo_errors = 0;
+ unsigned long c_collisions = 0;
+ struct slip *sl = netdev_priv(dev);
+ struct slcompress *comp = sl->slcomp;
- memset(&stats, 0, sizeof(struct net_device_stats));
-
- stats.rx_packets = sl->rx_packets;
- stats.tx_packets = sl->tx_packets;
- stats.rx_bytes = sl->rx_bytes;
- stats.tx_bytes = sl->tx_bytes;
- stats.rx_dropped = sl->rx_dropped;
- stats.tx_dropped = sl->tx_dropped;
- stats.tx_errors = sl->tx_errors;
- stats.rx_errors = sl->rx_errors;
- stats.rx_over_errors = sl->rx_over_errors;
-#ifdef SL_INCLUDE_CSLIP
- stats.rx_fifo_errors = sl->rx_compressed;
- stats.tx_fifo_errors = sl->tx_compressed;
- stats.collisions = sl->tx_misses;
- comp = sl->slcomp;
if (comp) {
- stats.rx_fifo_errors += comp->sls_i_compressed;
- stats.rx_dropped += comp->sls_i_tossed;
- stats.tx_fifo_errors += comp->sls_o_compressed;
- stats.collisions += comp->sls_o_misses;
+ c_rx_fifo_errors = comp->sls_i_compressed;
+ c_rx_dropped = comp->sls_i_tossed;
+ c_tx_fifo_errors = comp->sls_o_compressed;
+ c_collisions = comp->sls_o_misses;
}
-#endif /* CONFIG_INET */
- return (&stats);
+ stats->rx_fifo_errors = sl->rx_compressed + c_rx_fifo_errors;
+ stats->tx_fifo_errors = sl->tx_compressed + c_tx_fifo_errors;
+ stats->collisions = sl->tx_misses + c_collisions;
+#endif
+ stats->rx_packets = devstats->rx_packets;
+ stats->tx_packets = devstats->tx_packets;
+ stats->rx_bytes = devstats->rx_bytes;
+ stats->tx_bytes = devstats->tx_bytes;
+ stats->rx_dropped = devstats->rx_dropped + c_rx_dropped;
+ stats->tx_dropped = devstats->tx_dropped;
+ stats->tx_errors = devstats->tx_errors;
+ stats->rx_errors = devstats->rx_errors;
+ stats->rx_over_errors = devstats->rx_over_errors;
+
+ return stats;
}
/* Netdevice register callback */
@@ -633,7 +634,7 @@ static const struct net_device_ops sl_netdev_ops = {
.ndo_open = sl_open,
.ndo_stop = sl_close,
.ndo_start_xmit = sl_xmit,
- .ndo_get_stats = sl_get_stats,
+ .ndo_get_stats64 = sl_get_stats64,
.ndo_change_mtu = sl_change_mtu,
.ndo_tx_timeout = sl_tx_timeout,
#ifdef CONFIG_SLIP_SMART
@@ -681,7 +682,7 @@ static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->rx_errors++;
+ sl->dev->stats.rx_errors++;
cp++;
continue;
}
@@ -981,7 +982,7 @@ static void slip_unesc(struct slip *sl, unsigned char s)
sl->rbuff[sl->rcount++] = s;
return;
}
- sl->rx_over_errors++;
+ sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
@@ -1057,7 +1058,7 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
sl->rbuff[sl->rcount++] = c;
return;
}
- sl->rx_over_errors++;
+ sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
index 9ea5c11287d2..914e958abbfc 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip.h
@@ -67,15 +67,6 @@ struct slip {
int xleft; /* bytes left in XMIT queue */
/* SLIP interface statistics. */
- unsigned long rx_packets; /* inbound frames counter */
- unsigned long tx_packets; /* outbound frames counter */
- unsigned long rx_bytes; /* inbound byte counte */
- unsigned long tx_bytes; /* outbound byte counter */
- unsigned long rx_errors; /* Parity, etc. errors */
- unsigned long tx_errors; /* Planned stuff */
- unsigned long rx_dropped; /* No memory for skb */
- unsigned long tx_dropped; /* When MTU change */
- unsigned long rx_over_errors; /* Frame bigger than SLIP buf. */
#ifdef SL_INCLUDE_CSLIP
unsigned long tx_compressed;
unsigned long rx_compressed;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8d2772cc42f2..ee747919a766 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -83,43 +83,6 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
}
}
-#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6)
-
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_IO_SHIFT 0
-
-#define SMC_inw(a, r) in_be16((volatile u16 *)((a) + (r)))
-#define SMC_outw(v, a, r) out_be16((volatile u16 *)((a) + (r)), v)
-#define SMC_insw(a, r, p, l) \
- do { \
- unsigned long __port = (a) + (r); \
- u16 *__p = (u16 *)(p); \
- int __l = (l); \
- insw(__port, __p, __l); \
- while (__l > 0) { \
- *__p = swab16(*__p); \
- __p++; \
- __l--; \
- } \
- } while (0)
-#define SMC_outsw(a, r, p, l) \
- do { \
- unsigned long __port = (a) + (r); \
- u16 *__p = (u16 *)(p); \
- int __l = (l); \
- while (__l > 0) { \
- /* Believe it or not, the swab isn't needed. */ \
- outw( /* swab16 */ (*__p++), __port); \
- __l--; \
- } \
- } while (0)
-#define SMC_IRQ_FLAGS (0)
-
#elif defined(CONFIG_SA1100_PLEB)
/* We can only do 16-bit reads and writes in the static memory space. */
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae934ad0..13ddcd487200 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1048,7 +1048,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
pktwords);
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_receive_skb(skb);
/* Update counters */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 1636a34d95dd..cb6bcca9d541 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1000,9 +1000,9 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
!(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
} else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (data_status & SPIDER_NET_VLAN_PACKET) {
/* further enhancements: HW-accel VLAN
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a42b6873370b..4adf12422787 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -148,7 +148,7 @@ static int full_duplex[MAX_UNITS] = {0, };
* This SUCKS.
* We need a much better method to determine if dma_addr_t is 64-bit.
*/
-#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
+#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
/* 64-bit dma_addr_t */
#define ADDR_64BITS /* This chip uses 64 bit addresses. */
#define netdrv_addr_t __le64
@@ -302,7 +302,7 @@ enum chipset {
};
static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
- { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
+ { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
@@ -2078,11 +2078,7 @@ static int __init starfire_init (void)
printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
#endif
- /* we can do this test only at run-time... sigh */
- if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
- printk("This driver has dma_addr_t issues, please send email to maintainer\n");
- return -ENODEV;
- }
+ BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
return pci_register_driver(&starfire_driver);
}
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index eb63d44748a7..3c2af7c6a39b 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -3,10 +3,10 @@ config STMMAC_ETH
select MII
select PHYLIB
select CRC32
- depends on NETDEVICES && CPU_SUBTYPE_ST40
+ depends on NETDEVICES
help
This is the driver for the Ethernet IPs are built around a
- Synopsys IP Core and fully tested on the STMicroelectronics
+ Synopsys IP Core and only tested on the STMicroelectronics
platforms.
if STMMAC_ETH
@@ -32,6 +32,7 @@ config STMMAC_DUAL_MAC
config STMMAC_TIMER
bool "STMMAC Timer optimisation"
default n
+ depends on RTC_HCTOSYS_DEVICE
help
Use an external timer for mitigating the number of network
interrupts. Currently, for SH architectures, it is possible
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 66b9da0260fe..673ef86a063f 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -102,8 +102,6 @@ struct stmmac_extra_stats {
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
@@ -167,7 +165,7 @@ struct stmmac_desc_ops {
int (*get_tx_ls) (struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
int (*tx_status) (void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr);
+ struct dma_desc *p, void __iomem *ioaddr);
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
@@ -182,44 +180,46 @@ struct stmmac_desc_ops {
struct stmmac_dma_ops {
/* DMA core initialization */
- int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
/* Dump DMA registers */
- void (*dump_regs) (unsigned long ioaddr);
+ void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
* An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- void (*enable_dma_transmission) (unsigned long ioaddr);
- void (*enable_dma_irq) (unsigned long ioaddr);
- void (*disable_dma_irq) (unsigned long ioaddr);
- void (*start_tx) (unsigned long ioaddr);
- void (*stop_tx) (unsigned long ioaddr);
- void (*start_rx) (unsigned long ioaddr);
- void (*stop_rx) (unsigned long ioaddr);
- int (*dma_interrupt) (unsigned long ioaddr,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq) (void __iomem *ioaddr);
+ void (*disable_dma_irq) (void __iomem *ioaddr);
+ void (*start_tx) (void __iomem *ioaddr);
+ void (*stop_tx) (void __iomem *ioaddr);
+ void (*start_rx) (void __iomem *ioaddr);
+ void (*stop_rx) (void __iomem *ioaddr);
+ int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x);
};
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
+ /* Support checksum offload engine */
+ int (*rx_coe) (void __iomem *ioaddr);
/* Dump MAC registers */
- void (*dump_regs) (unsigned long ioaddr);
+ void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
+ void (*host_irq_status) (void __iomem *ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
- void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time);
/* Set power management mode (e.g. magic frame) */
- void (*pmt) (unsigned long ioaddr, unsigned long mode);
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
- void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
};
@@ -243,11 +243,11 @@ struct mac_device_info {
struct mac_link link;
};
-struct mac_device_info *dwmac1000_setup(unsigned long addr);
-struct mac_device_info *dwmac100_setup(unsigned long addr);
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
-extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
-extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 8b20b19971cb..81ee4fd04386 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -99,7 +99,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
- GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+ GMAC_CONTROL_JE | GMAC_CONTROL_BE)
/* GMAC Frame Filter defines */
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 2b2f5c8caf1c..c18c85993179 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include "dwmac1000.h"
-static void dwmac1000_core_init(unsigned long ioaddr)
+static void dwmac1000_core_init(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + GMAC_CONTROL);
value |= GMAC_CORE_INIT;
@@ -50,10 +50,22 @@ static void dwmac1000_core_init(unsigned long ioaddr)
#endif
}
-static void dwmac1000_dump_regs(unsigned long ioaddr)
+static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ value |= GMAC_CONTROL_IPC;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ value = readl(ioaddr + GMAC_CONTROL);
+
+ return !!(value & GMAC_CONTROL_IPC);
+}
+
+static void dwmac1000_dump_regs(void __iomem *ioaddr)
{
int i;
- pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+ pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
for (i = 0; i < 55; i++) {
int offset = i * 4;
@@ -62,14 +74,14 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
}
}
-static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
@@ -78,7 +90,7 @@ static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
static void dwmac1000_set_filter(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
unsigned int value = 0;
CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
@@ -139,7 +151,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
}
-static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = 0;
@@ -162,7 +174,7 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
writel(flow, ioaddr + GMAC_FLOW_CTRL);
}
-static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
{
unsigned int pmt = 0;
@@ -178,7 +190,7 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
}
-static void dwmac1000_irq_status(unsigned long ioaddr)
+static void dwmac1000_irq_status(void __iomem *ioaddr)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
@@ -202,6 +214,7 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
+ .rx_coe = dwmac1000_rx_coe_supported,
.dump_regs = dwmac1000_dump_regs,
.host_irq_status = dwmac1000_irq_status,
.set_filter = dwmac1000_set_filter,
@@ -211,7 +224,7 @@ struct stmmac_ops dwmac1000_ops = {
.get_umac_addr = dwmac1000_get_umac_addr,
};
-struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
{
struct mac_device_info *mac;
u32 uid = readl(ioaddr + GMAC_VERSION);
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 415805057cb0..ce6163e39cd5 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -29,14 +29,22 @@
#include "dwmac1000.h"
#include "dwmac_dma.h"
-static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+ limit = 15000;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ }
+ if (limit < 0)
+ return -EBUSY;
value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
((pbl << DMA_BUS_MODE_PBL_SHIFT) |
@@ -58,7 +66,7 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
return 0;
}
-static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -111,12 +119,12 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
/* Not yet implemented --- no RMON module */
static void dwmac1000_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x, unsigned long ioaddr)
+ struct stmmac_extra_stats *x, void __iomem *ioaddr)
{
return;
}
-static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
{
int i;
pr_info(" DMA registers\n");
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index 2fb165fa2ba0..58a914b27003 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -31,7 +31,7 @@
#include <linux/crc32.h>
#include "dwmac100.h"
-static void dwmac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -42,12 +42,17 @@ static void dwmac100_core_init(unsigned long ioaddr)
#endif
}
-static void dwmac100_dump_mac_regs(unsigned long ioaddr)
+static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
+{
+ return 0;
+}
+
+static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t DWMAC 100 CSR (base addr = 0x%p)\n"
"\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
@@ -77,18 +82,18 @@ static void dwmac100_dump_mac_regs(unsigned long ioaddr)
MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
}
-static void dwmac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(void __iomem *ioaddr)
{
return;
}
-static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
@@ -96,7 +101,7 @@ static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
static void dwmac100_set_filter(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = (void __iomem *) dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
if (dev->flags & IFF_PROMISC) {
@@ -145,7 +150,7 @@ static void dwmac100_set_filter(struct net_device *dev)
readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
}
-static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -158,13 +163,14 @@ static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
/* No PMT module supported for this Ethernet Controller.
* Tested on ST platforms only.
*/
-static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
+static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
{
return;
}
struct stmmac_ops dwmac100_ops = {
.core_init = dwmac100_core_init,
+ .rx_coe = dwmac100_rx_coe_supported,
.dump_regs = dwmac100_dump_mac_regs,
.host_irq_status = dwmac100_irq_status,
.set_filter = dwmac100_set_filter,
@@ -174,7 +180,7 @@ struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
{
struct mac_device_info *mac;
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index 2fece7b72727..96aac93b789b 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -31,14 +31,22 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+ limit = 15000;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ }
+ if (limit < 0)
+ return -EBUSY;
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -58,7 +66,7 @@ static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -73,7 +81,7 @@ static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
writel(csr6, ioaddr + DMA_CONTROL);
}
-static void dwmac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
{
int i;
@@ -91,7 +99,7 @@ static void dwmac100_dump_dma_regs(unsigned long ioaddr)
/* DMA controller has two counters to track the number of
* the receive missed frames. */
static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+ void __iomem *ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index 7b815a1b7b8c..da3f5ccf83d3 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -97,12 +97,12 @@
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
-extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
-extern void dwmac_enable_dma_irq(unsigned long ioaddr);
-extern void dwmac_disable_dma_irq(unsigned long ioaddr);
-extern void dwmac_dma_start_tx(unsigned long ioaddr);
-extern void dwmac_dma_stop_tx(unsigned long ioaddr);
-extern void dwmac_dma_start_rx(unsigned long ioaddr);
-extern void dwmac_dma_stop_rx(unsigned long ioaddr);
-extern int dwmac_dma_interrupt(unsigned long ioaddr,
+extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_dma_start_tx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
+extern void dwmac_dma_start_rx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
+extern int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index a85415216ef4..d65fab1ba790 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -32,43 +32,43 @@
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
-void dwmac_enable_dma_transmission(unsigned long ioaddr)
+void dwmac_enable_dma_transmission(void __iomem *ioaddr)
{
writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
}
-void dwmac_enable_dma_irq(unsigned long ioaddr)
+void dwmac_enable_dma_irq(void __iomem *ioaddr)
{
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
-void dwmac_disable_dma_irq(unsigned long ioaddr)
+void dwmac_disable_dma_irq(void __iomem *ioaddr)
{
writel(0, ioaddr + DMA_INTR_ENA);
}
-void dwmac_dma_start_tx(unsigned long ioaddr)
+void dwmac_dma_start_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_tx(unsigned long ioaddr)
+void dwmac_dma_stop_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_start_rx(unsigned long ioaddr)
+void dwmac_dma_start_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
}
-void dwmac_dma_stop_rx(unsigned long ioaddr)
+void dwmac_dma_stop_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
@@ -145,7 +145,7 @@ static void show_rx_process_state(unsigned int status)
}
#endif
-int dwmac_dma_interrupt(unsigned long ioaddr,
+int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x)
{
int ret = 0;
@@ -219,7 +219,7 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
return ret;
}
-void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -227,7 +227,7 @@ void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
@@ -238,7 +238,7 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
writel(data, ioaddr + low);
}
-void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low)
{
unsigned int hi_addr, lo_addr;
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index f612f986a7e1..5d1471d8f8f6 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -25,7 +25,7 @@
#include "common.h"
static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+ struct dma_desc *p, void __iomem *ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -284,7 +284,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
- memset(p, 0, sizeof(struct dma_desc));
+ memset(p, 0, offsetof(struct dma_desc, des2));
p->des01.etx.end_ring = ter;
}
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
index 31ad53643792..0dce90cb8124 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/stmmac/norm_desc.c
@@ -25,7 +25,7 @@
#include "common.h"
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+ struct dma_desc *p, void __iomem *ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -174,22 +174,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
- /* clean field used within the xmit */
- p->des01.tx.first_segment = 0;
- p->des01.tx.last_segment = 0;
- p->des01.tx.buffer1_size = 0;
-
- /* clean status reported */
- p->des01.tx.error_summary = 0;
- p->des01.tx.underflow_error = 0;
- p->des01.tx.no_carrier = 0;
- p->des01.tx.loss_carrier = 0;
- p->des01.tx.excessive_deferral = 0;
- p->des01.tx.excessive_collisions = 0;
- p->des01.tx.late_collision = 0;
- p->des01.tx.heartbeat_fail = 0;
- p->des01.tx.deferred = 0;
-
+ memset(p, 0, offsetof(struct dma_desc, des2));
/* set termination field */
p->des01.tx.end_ring = ter;
}
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ebebc644b1b8..92154ff7d702 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -21,6 +21,7 @@
*******************************************************************************/
#define DRV_MODULE_VERSION "Apr_2010"
+#include <linux/platform_device.h>
#include <linux/stmmac.h>
#include "common.h"
@@ -50,10 +51,10 @@ struct stmmac_priv {
int is_gmac;
dma_addr_t dma_rx_phy;
unsigned int dma_rx_size;
- int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
struct mac_device_info *hw;
+ void __iomem *ioaddr;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -65,7 +66,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
- void (*bus_setup)(unsigned long ioaddr);
+ void (*bus_setup)(void __iomem *ioaddr);
void *bsp_priv;
int phy_irq;
@@ -76,6 +77,7 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
+ int mii_clk_csr;
u32 msg_enable;
spinlock_t lock;
@@ -89,6 +91,9 @@ struct stmmac_priv {
struct vlan_group *vlgrp;
#endif
int enh_desc;
+ int rx_coe;
+ int bugged_jumbo;
+ int no_csum_insertion;
};
#ifdef CONFIG_STM_DRIVERS
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index f080509923f0..b32c16ae55c6 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -177,21 +177,21 @@ void stmmac_ethtool_gregs(struct net_device *dev,
if (!priv->is_gmac) {
/* MAC registers */
for (i = 0; i < 12; i++)
- reg_space[i] = readl(dev->base_addr + (i * 4));
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
/* DMA registers */
for (i = 0; i < 9; i++)
reg_space[i + 12] =
- readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
- reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
- reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
} else {
/* MAC registers */
for (i = 0; i < 55; i++)
- reg_space[i] = readl(dev->base_addr + (i * 4));
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
/* DMA registers */
for (i = 0; i < 22; i++)
reg_space[i + 55] =
- readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
}
}
@@ -209,7 +209,7 @@ u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- return priv->rx_csum;
+ return priv->rx_coe;
}
static void
@@ -263,11 +263,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
cmd.phy_address = phy->addr;
ret = phy_ethtool_sset(phy, &cmd);
}
- } else {
- unsigned long ioaddr = netdev->base_addr;
- priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ } else
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
priv->flow_ctrl, priv->pause);
- }
spin_unlock(&priv->lock);
return ret;
}
@@ -276,12 +274,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *dummy, u64 *data)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
int i;
/* Update HW stats if supported */
priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
- ioaddr);
+ priv->ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index bbb7951b9c4c..a908f7201aae 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -134,13 +134,6 @@ static int buf_sz = DMA_BUFFER_SIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-/* In case of Giga ETH, we can enable/disable the COE for the
- * transmit HW checksum computation.
- * Note that, if tx csum is off in HW, SG will be still supported. */
-static int tx_coe = HW_CSUM;
-module_param(tx_coe, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
@@ -202,7 +195,6 @@ static void stmmac_adjust_link(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
- unsigned long ioaddr = dev->base_addr;
unsigned long flags;
int new_state = 0;
unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
@@ -215,7 +207,7 @@ static void stmmac_adjust_link(struct net_device *dev)
spin_lock_irqsave(&priv->lock, flags);
if (phydev->link) {
- u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
@@ -229,7 +221,7 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/* Flow Control operation */
if (phydev->pause)
- priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
fc, pause_time);
if (phydev->speed != priv->speed) {
@@ -238,6 +230,9 @@ static void stmmac_adjust_link(struct net_device *dev)
case 1000:
if (likely(priv->is_gmac))
ctrl &= ~priv->hw->link.port;
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
case 100:
case 10:
@@ -265,7 +260,7 @@ static void stmmac_adjust_link(struct net_device *dev)
priv->speed = phydev->speed;
}
- writel(ctrl, ioaddr + MAC_CTRL_REG);
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
if (!priv->oldlink) {
new_state = 1;
@@ -342,7 +337,7 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
+static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value |= MAC_RNABLE_RX;
@@ -350,7 +345,7 @@ static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
+static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value |= MAC_ENABLE_TX;
@@ -358,14 +353,14 @@ static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
+static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value &= ~MAC_RNABLE_RX;
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
+static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
value &= ~MAC_ENABLE_TX;
@@ -567,29 +562,22 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
- * or Store-And-Forward capability. It also verifies the COE for the
- * transmission in case of Giga ETH.
+ * or Store-And-Forward capability.
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (!priv->is_gmac) {
- /* MAC 10/100 */
- priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
- priv->tx_coe = NO_HW_CSUM;
- } else {
- if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->hw->dma->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
- tc = SF_DMA_MODE;
- priv->tx_coe = HW_CSUM;
- } else {
- /* Checksum computation is performed in software. */
- priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
- priv->tx_coe = NO_HW_CSUM;
- }
- }
- tx_coe = priv->tx_coe;
+ if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
+ /* In case of GMAC, SF mode has to be enabled
+ * to perform the TX COE. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ priv->hw->dma->dma_mode(priv->ioaddr,
+ SF_DMA_MODE, SF_DMA_MODE);
+ tc = SF_DMA_MODE;
+ } else
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
}
/**
@@ -600,7 +588,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static void stmmac_tx(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
- unsigned long ioaddr = priv->dev->base_addr;
while (priv->dirty_tx != priv->cur_tx) {
int last;
@@ -618,7 +605,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
int tx_error =
priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
- ioaddr);
+ priv->ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -674,7 +661,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -684,7 +671,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
+ priv->hw->dma->disable_dma_irq(priv->ioaddr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -739,14 +726,15 @@ static void stmmac_no_timer_stopped(void)
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
+
netif_stop_queue(priv->dev);
- priv->hw->dma->stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- priv->hw->dma->start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->ioaddr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -755,11 +743,9 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
- unsigned long ioaddr = priv->dev->base_addr;
int status;
- status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
- &priv->xstats);
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
if (likely(status == handle_tx_rx))
_stmmac_schedule(priv);
@@ -767,7 +753,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
/* Try to bump up the dma threshold on this failure */
if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
tc += 64;
- priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
stmmac_tx_err(priv);
@@ -787,7 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
int ret;
/* Check that the MAC address is valid. If its not, refuse
@@ -843,7 +828,8 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
+ priv->dma_tx_phy,
priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
@@ -851,22 +837,28 @@ static int stmmac_open(struct net_device *dev)
}
/* Copy the MAC addr into the HW */
- priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
/* If required, perform hw setup of the bus. */
if (priv->bus_setup)
- priv->bus_setup(ioaddr);
+ priv->bus_setup(priv->ioaddr);
/* Initialize the MAC Core */
- priv->hw->mac->core_init(ioaddr);
+ priv->hw->mac->core_init(priv->ioaddr);
+
+ priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+ if (priv->rx_coe)
+ pr_info("stmmac: Rx Checksum Offload Engine supported\n");
+ if (priv->tx_coe)
+ pr_info("\tTX Checksum insertion supported\n");
priv->shutdown = 0;
/* Initialise the MMC (if present) to disable all interrupts. */
- writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
- writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
+ writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
+ writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Enable the MAC Rx/Tx */
- stmmac_mac_enable_rx(ioaddr);
- stmmac_mac_enable_tx(ioaddr);
+ stmmac_mac_enable_rx(priv->ioaddr);
+ stmmac_mac_enable_tx(priv->ioaddr);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -877,16 +869,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- priv->hw->dma->start_tx(ioaddr);
- priv->hw->dma->start_rx(ioaddr);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->hw->mac->dump_regs(ioaddr);
- priv->hw->dma->dump_regs(ioaddr);
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
}
if (priv->phydev)
@@ -930,15 +922,15 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- priv->hw->dma->stop_tx(dev->base_addr);
- priv->hw->dma->stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
/* Disable the MAC core */
- stmmac_mac_disable_tx(dev->base_addr);
- stmmac_mac_disable_rx(dev->base_addr);
+ stmmac_mac_disable_tx(priv->ioaddr);
+ stmmac_mac_disable_rx(priv->ioaddr);
netif_carrier_off(dev);
@@ -1066,7 +1058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_sw_tso(priv, skb);
if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
- if (likely(priv->tx_coe == NO_HW_CSUM))
+ if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
skb_checksum_help(skb);
else
csum_insertion = 1;
@@ -1140,7 +1132,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- priv->hw->dma->enable_dma_transmission(dev->base_addr);
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
return NETDEV_TX_OK;
}
@@ -1256,7 +1248,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (unlikely(status == csum_none)) {
/* always for the old mac 10/100 */
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
netif_receive_skb(skb);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1390,6 +1382,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertionin the TDES and not use SF. */
+ if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
+ priv->no_csum_insertion = 1;
+ else
+ priv->no_csum_insertion = 0;
+
dev->mtu = new_mtu;
return 0;
@@ -1405,11 +1406,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->is_gmac) {
- unsigned long ioaddr = dev->base_addr;
+ if (priv->is_gmac)
/* To handle GMAC own interrupts */
- priv->hw->mac->host_irq_status(ioaddr);
- }
+ priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
stmmac_dma_interrupt(priv);
@@ -1512,9 +1511,6 @@ static int stmmac_probe(struct net_device *dev)
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
- if (priv->is_gmac)
- priv->rx_csum = 1;
-
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -1522,7 +1518,8 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
+ dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1552,14 +1549,13 @@ static int stmmac_probe(struct net_device *dev)
static int stmmac_mac_device_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
struct mac_device_info *device;
if (priv->is_gmac)
- device = dwmac1000_setup(ioaddr);
+ device = dwmac1000_setup(priv->ioaddr);
else
- device = dwmac100_setup(ioaddr);
+ device = dwmac100_setup(priv->ioaddr);
if (!device)
return -ENOMEM;
@@ -1653,7 +1649,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
- unsigned int *addr = NULL;
+ void __iomem *addr = NULL;
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
struct plat_stmmacenet_data *plat_dat;
@@ -1664,7 +1660,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
ret = -ENODEV;
goto out;
}
- pr_info("done!\n");
+ pr_info("\tdone!\n");
if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
@@ -1706,8 +1702,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
+ priv->mii_clk_csr = plat_dat->clk_csr;
+ priv->tx_coe = plat_dat->tx_coe;
+ priv->bugged_jumbo = plat_dat->bugged_jumbo;
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
priv->enh_desc = plat_dat->enh_desc;
+ priv->ioaddr = addr;
platform_set_drvdata(pdev, ndev);
@@ -1743,8 +1743,8 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
- "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
- pdev->id, ndev->irq, (unsigned int)addr);
+ "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
+ pdev->id, ndev->irq, addr);
/* MDIO bus Registration */
pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
@@ -1779,11 +1779,11 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
pr_info("%s:\n\tremoving driver", __func__);
- priv->hw->dma->stop_rx(ndev->base_addr);
- priv->hw->dma->stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_mac_disable_rx(ndev->base_addr);
- stmmac_mac_disable_tx(ndev->base_addr);
+ stmmac_mac_disable_rx(priv->ioaddr);
+ stmmac_mac_disable_tx(priv->ioaddr);
netif_carrier_off(ndev);
@@ -1792,7 +1792,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
- iounmap((void *)ndev->base_addr);
+ iounmap((void *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -1827,22 +1827,21 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- priv->hw->dma->stop_tx(dev->base_addr);
- priv->hw->dma->stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
/* Clear the Rx/Tx descriptors */
priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
- stmmac_mac_disable_tx(dev->base_addr);
+ stmmac_mac_disable_tx(priv->ioaddr);
if (device_may_wakeup(&(pdev->dev))) {
/* Enable Power down mode by programming the PMT regs */
if (priv->wolenabled == PMT_SUPPORTED)
- priv->hw->mac->pmt(dev->base_addr,
- priv->wolopts);
+ priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
} else {
- stmmac_mac_disable_rx(dev->base_addr);
+ stmmac_mac_disable_rx(priv->ioaddr);
}
} else {
priv->shutdown = 1;
@@ -1860,20 +1859,19 @@ static int stmmac_resume(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
if (!netif_running(dev))
return 0;
- spin_lock(&priv->lock);
-
if (priv->shutdown) {
/* Re-open the interface and re-init the MAC/DMA
- and the rings. */
+ and the rings (i.e. on hibernation stage) */
stmmac_open(dev);
- goto out_resume;
+ return 0;
}
+ spin_lock(&priv->lock);
+
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -1881,15 +1879,15 @@ static int stmmac_resume(struct platform_device *pdev)
* from another devices (e.g. serial console). */
if (device_may_wakeup(&(pdev->dev)))
if (priv->wolenabled == PMT_SUPPORTED)
- priv->hw->mac->pmt(dev->base_addr, 0);
+ priv->hw->mac->pmt(priv->ioaddr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
- stmmac_mac_enable_rx(ioaddr);
- stmmac_mac_enable_tx(ioaddr);
- priv->hw->dma->start_tx(ioaddr);
- priv->hw->dma->start_rx(ioaddr);
+ stmmac_mac_enable_rx(priv->ioaddr);
+ stmmac_mac_enable_tx(priv->ioaddr);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
@@ -1901,7 +1899,6 @@ static int stmmac_resume(struct platform_device *pdev)
netif_start_queue(dev);
-out_resume:
spin_unlock(&priv->lock);
return 0;
}
@@ -1969,8 +1966,6 @@ static int __init stmmac_cmdline_opt(char *str)
strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
else if (!strncmp(opt, "tc:", 3))
strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
- else if (!strncmp(opt, "tx_coe:", 7))
- strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
else if (!strncmp(opt, "watchdog:", 9))
strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
else if (!strncmp(opt, "flow_ctrl:", 10))
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 40b2c7929719..d7441616357d 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -47,21 +47,20 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
((phyreg << 6) & (0x000007C0)));
- regValue |= MII_BUSY; /* in case of GMAC */
+ regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
- writel(regValue, ioaddr + mii_address);
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+ writel(regValue, priv->ioaddr + mii_address);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
/* Read the data from the MII data register */
- data = (int)readl(ioaddr + mii_data);
+ data = (int)readl(priv->ioaddr + mii_data);
return data;
}
@@ -79,7 +78,6 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
@@ -87,17 +85,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
| MII_WRITE;
- value |= MII_BUSY;
+ value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+
/* Wait until any existing MII operation is complete */
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
/* Set the MII address register to write */
- writel(phydata, ioaddr + mii_data);
- writel(value, ioaddr + mii_address);
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
/* Wait until any existing MII operation is complete */
- do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
return 0;
}
@@ -111,7 +110,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
@@ -123,7 +121,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
* It doesn't complete its reset until at least one clock cycle
* on MDC, so perform a dummy mdio read.
*/
- writel(0, ioaddr + mii_address);
+ writel(0, priv->ioaddr + mii_address);
return 0;
}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 367e96f317d4..0a6a5ced3c1c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -97,7 +97,7 @@ static int qec_global_reset(void __iomem *gregs)
static void qec_init(struct bigmac *bp)
{
- struct of_device *qec_op = bp->qec_op;
+ struct platform_device *qec_op = bp->qec_op;
void __iomem *gregs = bp->gregs;
u8 bsizes = bp->bigmac_bursts;
u32 regval;
@@ -617,7 +617,7 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
bp->timer_ticks = 0;
bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
bp->bigmac_timer.data = (unsigned long) bp;
- bp->bigmac_timer.function = &bigmac_timer;
+ bp->bigmac_timer.function = bigmac_timer;
add_timer(&bp->bigmac_timer);
}
@@ -1083,8 +1083,8 @@ static const struct net_device_ops bigmac_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit bigmac_ether_init(struct of_device *op,
- struct of_device *qec_op)
+static int __devinit bigmac_ether_init(struct platform_device *op,
+ struct platform_device *qec_op)
{
static int version_printed;
struct net_device *dev;
@@ -1201,7 +1201,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
dev->watchdog_timeo = 5*HZ;
/* Finish net device registration. */
- dev->irq = bp->bigmac_op->irqs[0];
+ dev->irq = bp->bigmac_op->archdata.irqs[0];
dev->dma = 0;
if (register_netdev(dev)) {
@@ -1242,25 +1242,25 @@ fail_and_cleanup:
/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
* the latter.
*/
-static int __devinit bigmac_sbus_probe(struct of_device *op,
+static int __devinit bigmac_sbus_probe(struct platform_device *op,
const struct of_device_id *match)
{
struct device *parent = op->dev.parent;
- struct of_device *qec_op;
+ struct platform_device *qec_op;
- qec_op = to_of_device(parent);
+ qec_op = to_platform_device(parent);
return bigmac_ether_init(op, qec_op);
}
-static int __devexit bigmac_sbus_remove(struct of_device *op)
+static int __devexit bigmac_sbus_remove(struct platform_device *op)
{
struct bigmac *bp = dev_get_drvdata(&op->dev);
struct device *parent = op->dev.parent;
struct net_device *net_dev = bp->dev;
- struct of_device *qec_op;
+ struct platform_device *qec_op;
- qec_op = to_of_device(parent);
+ qec_op = to_platform_device(parent);
unregister_netdev(net_dev);
@@ -1301,12 +1301,12 @@ static struct of_platform_driver bigmac_sbus_driver = {
static int __init bigmac_init(void)
{
- return of_register_driver(&bigmac_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&bigmac_sbus_driver);
}
static void __exit bigmac_exit(void)
{
- of_unregister_driver(&bigmac_sbus_driver);
+ of_unregister_platform_driver(&bigmac_sbus_driver);
}
module_init(bigmac_init);
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
index 8840bc0b840b..8db88945b889 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/sunbmac.h
@@ -329,8 +329,8 @@ struct bigmac {
unsigned int timer_ticks;
struct net_device_stats enet_stats;
- struct of_device *qec_op;
- struct of_device *bigmac_op;
+ struct platform_device *qec_op;
+ struct platform_device *bigmac_op;
struct net_device *dev;
};
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 2678588ea4b2..7dfdbee878e8 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -874,7 +874,7 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = jiffies + 3*HZ;
np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
+ np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
/* Enable interrupts by setting the interrupt mask. */
@@ -1757,11 +1757,59 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PM
+
+static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ netdev_close(dev);
+ netif_device_detach(dev);
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+ return 0;
+}
+
+static int sundance_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ int err = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ err = netdev_open(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Can't resume interface!\n",
+ dev->name);
+ goto out;
+ }
+
+ netif_device_attach(dev);
+
+out:
+ return err;
+}
+
+#endif /* CONFIG_PM */
+
static struct pci_driver sundance_driver = {
.name = DRV_NAME,
.id_table = sundance_pci_tbl,
.probe = sundance_probe1,
.remove = __devexit_p(sundance_remove1),
+#ifdef CONFIG_PM
+ .suspend = sundance_suspend,
+ .resume = sundance_resume,
+#endif /* CONFIG_PM */
};
static int __init sundance_init(void)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 434f9d735333..4ceb3cf6a9a9 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -31,6 +31,8 @@
* about when we can start taking interrupts or get xmit() called...
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -105,7 +107,6 @@ MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
-#define PFX GEM_MODULE_NAME ": "
static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
@@ -262,8 +263,7 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
gp->dev->name, pcs_istat);
if (!(pcs_istat & PCS_ISTAT_LSC)) {
- printk(KERN_ERR "%s: PCS irq but no link status change???\n",
- dev->name);
+ netdev_err(dev, "PCS irq but no link status change???\n");
return 0;
}
@@ -282,20 +282,16 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
* when autoneg has completed.
*/
if (pcs_miistat & PCS_MIISTAT_RF)
- printk(KERN_INFO "%s: PCS AutoNEG complete, "
- "RemoteFault\n", dev->name);
+ netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
else
- printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
- dev->name);
+ netdev_info(dev, "PCS AutoNEG complete\n");
}
if (pcs_miistat & PCS_MIISTAT_LS) {
- printk(KERN_INFO "%s: PCS link is now up.\n",
- dev->name);
+ netdev_info(dev, "PCS link is now up\n");
netif_carrier_on(gp->dev);
} else {
- printk(KERN_INFO "%s: PCS link is now down.\n",
- dev->name);
+ netdev_info(dev, "PCS link is now down\n");
netif_carrier_off(gp->dev);
/* If this happens and the link timer is not running,
* reset so we re-negotiate.
@@ -323,14 +319,12 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
return 0;
if (txmac_stat & MAC_TXSTAT_URUN) {
- printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
- dev->name);
+ netdev_err(dev, "TX MAC xmit underrun\n");
gp->net_stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
- printk(KERN_ERR "%s: TX MAC max packet size error.\n",
- dev->name);
+ netdev_err(dev, "TX MAC max packet size error\n");
gp->net_stats.tx_errors++;
}
@@ -377,8 +371,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
return 1;
}
@@ -390,8 +383,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
@@ -403,8 +395,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
@@ -419,8 +410,7 @@ static int gem_rxmac_reset(struct gem *gp)
udelay(10);
}
if (limit == 5000) {
- printk(KERN_ERR "%s: RX reset command will not execute, resetting "
- "whole chip.\n", dev->name);
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
@@ -429,8 +419,7 @@ static int gem_rxmac_reset(struct gem *gp)
struct gem_rxd *rxd = &gp->init_block->rxd[i];
if (gp->rx_skbs[i] == NULL) {
- printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
- "whole chip.\n", dev->name);
+ netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
return 1;
}
@@ -479,8 +468,7 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
if (rxmac_stat & MAC_RXSTAT_OFLW) {
u32 smac = readl(gp->regs + MAC_SMACHINE);
- printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
- dev->name, smac);
+ netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
gp->net_stats.rx_over_errors++;
gp->net_stats.rx_fifo_errors++;
@@ -542,19 +530,18 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
- printk(KERN_ERR "%s: PCI error [%04x] ",
- dev->name, pci_estat);
+ netdev_err(dev, "PCI error [%04x]", pci_estat);
if (pci_estat & GREG_PCIESTAT_BADACK)
- printk("<No ACK64# during ABS64 cycle> ");
+ pr_cont(" <No ACK64# during ABS64 cycle>");
if (pci_estat & GREG_PCIESTAT_DTRTO)
- printk("<Delayed transaction timeout> ");
+ pr_cont(" <Delayed transaction timeout>");
if (pci_estat & GREG_PCIESTAT_OTHER)
- printk("<other>");
- printk("\n");
+ pr_cont(" <other>");
+ pr_cont("\n");
} else {
pci_estat |= GREG_PCIESTAT_OTHER;
- printk(KERN_ERR "%s: PCI error\n", dev->name);
+ netdev_err(dev, "PCI error\n");
}
if (pci_estat & GREG_PCIESTAT_OTHER) {
@@ -565,26 +552,20 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
*/
pci_read_config_word(gp->pdev, PCI_STATUS,
&pci_cfg_stat);
- printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
- dev->name, pci_cfg_stat);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n",
+ pci_cfg_stat);
if (pci_cfg_stat & PCI_STATUS_PARITY)
- printk(KERN_ERR "%s: PCI parity error detected.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error detected\n");
if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI target abort\n");
if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI master acks target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master acks target abort\n");
if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
- printk(KERN_ERR "%s: PCI master abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master abort\n");
if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
- printk(KERN_ERR "%s: PCI system error SERR#.\n",
- dev->name);
+ netdev_err(dev, "PCI system error SERR#\n");
if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
- printk(KERN_ERR "%s: PCI parity error.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
pci_cfg_stat &= (PCI_STATUS_PARITY |
@@ -874,8 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
gp->rx_new = entry;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- gp->dev->name);
+ netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
return work_done;
}
@@ -981,21 +961,19 @@ static void gem_tx_timeout(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
if (!gp->running) {
- printk("%s: hrm.. hw not running !\n", dev->name);
+ netdev_err(dev, "hrm.. hw not running !\n");
return;
}
- printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(gp->regs + TXDMA_CFG),
- readl(gp->regs + MAC_TXSTAT),
- readl(gp->regs + MAC_TXCFG));
- printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(gp->regs + RXDMA_CFG),
- readl(gp->regs + MAC_RXSTAT),
- readl(gp->regs + MAC_RXCFG));
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
+ readl(gp->regs + TXDMA_CFG),
+ readl(gp->regs + MAC_TXSTAT),
+ readl(gp->regs + MAC_TXCFG));
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(gp->regs + RXDMA_CFG),
+ readl(gp->regs + MAC_RXSTAT),
+ readl(gp->regs + MAC_RXCFG));
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
@@ -1048,8 +1026,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&gp->tx_lock, flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1158,8 +1135,7 @@ static void gem_pcs_reset(struct gem *gp)
break;
}
if (limit < 0)
- printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
- gp->dev->name);
+ netdev_warn(gp->dev, "PCS reset bit would not clear\n");
}
static void gem_pcs_reinit_adv(struct gem *gp)
@@ -1230,7 +1206,7 @@ static void gem_reset(struct gem *gp)
} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
if (limit < 0)
- printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
+ netdev_err(gp->dev, "SW reset is ghetto\n");
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
gem_pcs_reinit_adv(gp);
@@ -1395,9 +1371,8 @@ static int gem_set_link_modes(struct gem *gp)
speed = SPEED_1000;
}
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
- gp->dev->name, speed, (full_duplex ? "full" : "half"));
+ netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
+ speed, (full_duplex ? "full" : "half"));
if (!gp->running)
return 0;
@@ -1451,15 +1426,13 @@ static int gem_set_link_modes(struct gem *gp)
if (netif_msg_link(gp)) {
if (pause) {
- printk(KERN_INFO "%s: Pause is enabled "
- "(rxfifo: %d off: %d on: %d)\n",
- gp->dev->name,
- gp->rx_fifo_sz,
- gp->rx_pause_off,
- gp->rx_pause_on);
+ netdev_info(gp->dev,
+ "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ gp->rx_fifo_sz,
+ gp->rx_pause_off,
+ gp->rx_pause_on);
} else {
- printk(KERN_INFO "%s: Pause is disabled\n",
- gp->dev->name);
+ netdev_info(gp->dev, "Pause is disabled\n");
}
}
@@ -1484,9 +1457,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
{
switch (gp->lstate) {
case link_force_ret:
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", gp->dev->name);
+ netif_info(gp, link, gp->dev,
+ "Autoneg failed again, keeping forced mode\n");
gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
gp->last_forced_speed, DUPLEX_HALF);
gp->timer_ticks = 5;
@@ -1499,9 +1471,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
*/
if (gp->phy_mii.def->magic_aneg)
return 1;
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: switching to forced 100bt\n",
- gp->dev->name);
+ netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
/* Try forced modes. */
gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
DUPLEX_HALF);
@@ -1517,9 +1487,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
DUPLEX_HALF);
gp->timer_ticks = 5;
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: switching to forced 10bt\n",
- gp->dev->name);
+ netif_info(gp, link, gp->dev,
+ "switching to forced 10bt\n");
return 0;
} else
return 1;
@@ -1574,8 +1543,8 @@ static void gem_link_timer(unsigned long data)
gp->last_forced_speed = gp->phy_mii.speed;
gp->timer_ticks = 5;
if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Got link after fallback, retrying"
- " autoneg once...\n", gp->dev->name);
+ netdev_info(gp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
} else if (gp->lstate != link_up) {
gp->lstate = link_up;
@@ -1589,9 +1558,7 @@ static void gem_link_timer(unsigned long data)
*/
if (gp->lstate == link_up) {
gp->lstate = link_down;
- if (netif_msg_link(gp))
- printk(KERN_INFO "%s: Link down\n",
- gp->dev->name);
+ netif_info(gp, link, gp->dev, "Link down\n");
netif_carrier_off(gp->dev);
gp->reset_task_pending = 1;
schedule_work(&gp->reset_task);
@@ -1746,8 +1713,7 @@ static void gem_init_phy(struct gem *gp)
if (phy_read(gp, MII_BMCR) != 0xffff)
break;
if (i == 2)
- printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
- gp->dev->name);
+ netdev_warn(gp->dev, "GMAC PHY not responding !\n");
}
}
@@ -2038,7 +2004,7 @@ static int gem_check_invariants(struct gem *gp)
* as this chip has no gigabit PHY.
*/
if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
- printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
+ pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
mif_cfg);
return -1;
}
@@ -2078,7 +2044,7 @@ static int gem_check_invariants(struct gem *gp)
}
if (i == 32) {
if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
- printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
+ pr_err("RIO MII phy will not respond\n");
return -1;
}
gp->phy_type = phy_serdes;
@@ -2093,7 +2059,7 @@ static int gem_check_invariants(struct gem *gp)
if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
if (gp->tx_fifo_sz != (9 * 1024) ||
gp->rx_fifo_sz != (20 * 1024)) {
- printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
gp->tx_fifo_sz, gp->rx_fifo_sz);
return -1;
}
@@ -2101,7 +2067,7 @@ static int gem_check_invariants(struct gem *gp)
} else {
if (gp->tx_fifo_sz != (2 * 1024) ||
gp->rx_fifo_sz != (2 * 1024)) {
- printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
gp->tx_fifo_sz, gp->rx_fifo_sz);
return -1;
}
@@ -2239,7 +2205,7 @@ static int gem_do_start(struct net_device *dev)
if (request_irq(gp->pdev->irq, gem_interrupt,
IRQF_SHARED, dev->name, (void *)dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
+ netdev_err(dev, "failed to request irq !\n");
spin_lock_irqsave(&gp->lock, flags);
spin_lock(&gp->tx_lock);
@@ -2378,9 +2344,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
mutex_lock(&gp->pm_mutex);
- printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
- dev->name,
- (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
+ netdev_info(dev, "suspending, WakeOnLan %s\n",
+ (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
/* Keep the cell enabled during the entire operation */
spin_lock_irqsave(&gp->lock, flags);
@@ -2440,7 +2405,7 @@ static int gem_resume(struct pci_dev *pdev)
struct gem *gp = netdev_priv(dev);
unsigned long flags;
- printk(KERN_INFO "%s: resuming\n", dev->name);
+ netdev_info(dev, "resuming\n");
mutex_lock(&gp->pm_mutex);
@@ -2452,8 +2417,7 @@ static int gem_resume(struct pci_dev *pdev)
/* Make sure PCI access and bus master are enabled */
if (pci_enable_device(gp->pdev)) {
- printk(KERN_ERR "%s: Can't re-enable chip !\n",
- dev->name);
+ netdev_err(dev, "Can't re-enable chip !\n");
/* Put cell and forget it for now, it will be considered as
* still asleep, a new sleep cycle may bring it back
*/
@@ -2938,7 +2902,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
addr = idprom->id_ethaddr;
#else
printk("\n");
- printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
+ pr_err("%s: can't get mac-address\n", dev->name);
return -1;
#endif
}
@@ -3009,14 +2973,12 @@ static const struct net_device_ops gem_netdev_ops = {
static int __devinit gem_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int gem_version_printed = 0;
unsigned long gemreg_base, gemreg_len;
struct net_device *dev;
struct gem *gp;
int err, pci_using_dac;
- if (gem_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s", version);
/* Apple gmac note: during probe, the chip is powered up by
* the arch code to allow the code below to work (and to let
@@ -3026,8 +2988,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
*/
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable MMIO operation, "
- "aborting.\n");
+ pr_err("Cannot enable MMIO operation, aborting\n");
return err;
}
pci_set_master(pdev);
@@ -3048,8 +3009,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ pr_err("No usable DMA configuration, aborting\n");
goto err_disable_device;
}
pci_using_dac = 0;
@@ -3059,15 +3019,14 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gemreg_len = pci_resource_len(pdev, 0);
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
- printk(KERN_ERR PFX "Cannot find proper PCI device "
- "base address, aborting.\n");
+ pr_err("Cannot find proper PCI device base address, aborting\n");
err = -ENODEV;
goto err_disable_device;
}
dev = alloc_etherdev(sizeof(*gp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_disable_device;
}
@@ -3077,8 +3036,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
@@ -3104,8 +3062,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gp->regs = ioremap(gemreg_base, gemreg_len);
if (!gp->regs) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ pr_err("Cannot map device registers, aborting\n");
err = -EIO;
goto err_out_free_res;
}
@@ -3150,8 +3107,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
&gp->gblock_dvma);
if (!gp->init_block) {
- printk(KERN_ERR PFX "Cannot allocate init block, "
- "aborting.\n");
+ pr_err("Cannot allocate init block, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -3180,19 +3136,18 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
/* Register with kernel */
if (register_netdev(dev)) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ pr_err("Cannot register net device, aborting\n");
err = -ENOMEM;
goto err_out_free_consistent;
}
- printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
- dev->name, dev->dev_addr);
+ netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
+ dev->dev_addr);
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1)
- printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
- gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+ netdev_info(dev, "Found %s PHY\n",
+ gp->phy_mii.def ? gp->phy_mii.def->name : "no");
/* GEM can do it all... */
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 78f8cee5fd74..4a4fac630337 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -1175,7 +1175,8 @@ int mii_phy_probe(struct mii_phy *phy, int mii_id)
/* Read ID and find matching entry */
id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
- printk(KERN_DEBUG "PHY ID: %x, addr: %x\n", id, mii_id);
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
+ id, mii_id);
for (i=0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id)
break;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 3d9650b8d38f..45f315ed1868 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1409,7 +1409,7 @@ force_link:
hp->timer_ticks = 0;
hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
hp->happy_timer.data = (unsigned long) hp;
- hp->happy_timer.function = &happy_meal_timer;
+ hp->happy_timer.function = happy_meal_timer;
add_timer(&hp->happy_timer);
}
@@ -1591,7 +1591,7 @@ static int happy_meal_init(struct happy_meal *hp)
*/
#ifdef CONFIG_SBUS
if ((hp->happy_flags & HFLAG_PCI) == 0) {
- struct of_device *op = hp->happy_dev;
+ struct platform_device *op = hp->happy_dev;
if (sbus_can_dma_64bit()) {
sbus_set_sbus64(&op->dev,
hp->happy_bursts);
@@ -2480,7 +2480,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
#ifdef CONFIG_SBUS
else {
const struct linux_prom_registers *regs;
- struct of_device *op = hp->happy_dev;
+ struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d",
@@ -2515,13 +2515,13 @@ static int hme_version_printed;
*
* Return NULL on failure.
*/
-static struct quattro * __devinit quattro_sbus_find(struct of_device *child)
+static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
{
struct device *parent = child->dev.parent;
- struct of_device *op;
+ struct platform_device *op;
struct quattro *qp;
- op = to_of_device(parent);
+ op = to_platform_device(parent);
qp = dev_get_drvdata(&op->dev);
if (qp)
return qp;
@@ -2551,7 +2551,7 @@ static int __init quattro_sbus_register_irqs(void)
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
- struct of_device *op = qp->quattro_dev;
+ struct platform_device *op = qp->quattro_dev;
int err, qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
@@ -2561,7 +2561,7 @@ static int __init quattro_sbus_register_irqs(void)
if (skip)
continue;
- err = request_irq(op->irqs[0],
+ err = request_irq(op->archdata.irqs[0],
quattro_sbus_interrupt,
IRQF_SHARED, "Quattro",
qp);
@@ -2580,7 +2580,7 @@ static void quattro_sbus_free_irqs(void)
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
- struct of_device *op = qp->quattro_dev;
+ struct platform_device *op = qp->quattro_dev;
int qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
@@ -2590,7 +2590,7 @@ static void quattro_sbus_free_irqs(void)
if (skip)
continue;
- free_irq(op->irqs[0], qp);
+ free_irq(op->archdata.irqs[0], qp);
}
}
#endif /* CONFIG_SBUS */
@@ -2639,7 +2639,7 @@ static const struct net_device_ops hme_netdev_ops = {
};
#ifdef CONFIG_SBUS
-static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
+static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
{
struct device_node *dp = op->dev.of_node, *sbus_dp;
struct quattro *qp = NULL;
@@ -2648,7 +2648,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
int i, qfe_slot = -1;
int err = -ENODEV;
- sbus_dp = to_of_device(op->dev.parent)->dev.of_node;
+ sbus_dp = op->dev.parent->of_node;
/* We can match PCI devices too, do not accept those here. */
if (strcmp(sbus_dp->name, "sbus"))
@@ -2790,7 +2790,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
/* Happy Meal can do it all... */
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
/* Hook up SBUS register/descriptor accessors. */
@@ -2808,7 +2808,8 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- if (register_netdev(hp->dev)) {
+ err = register_netdev(hp->dev);
+ if (err) {
printk(KERN_ERR "happymeal: Cannot register net device, "
"aborting.\n");
goto err_out_free_coherent;
@@ -3130,7 +3131,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- if (register_netdev(hp->dev)) {
+ err = register_netdev(hp->dev);
+ if (err) {
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
"aborting.\n");
goto err_out_iounmap;
@@ -3235,7 +3237,7 @@ static void happy_meal_pci_exit(void)
#endif
#ifdef CONFIG_SBUS
-static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit hme_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
@@ -3247,7 +3249,7 @@ static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device
return happy_meal_sbus_probe_one(op, is_qfe);
}
-static int __devexit hme_sbus_remove(struct of_device *op)
+static int __devexit hme_sbus_remove(struct platform_device *op)
{
struct happy_meal *hp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = hp->dev;
@@ -3304,7 +3306,7 @@ static int __init happy_meal_sbus_init(void)
{
int err;
- err = of_register_driver(&hme_sbus_driver, &of_bus_type);
+ err = of_register_platform_driver(&hme_sbus_driver);
if (!err)
err = quattro_sbus_register_irqs();
@@ -3313,7 +3315,7 @@ static int __init happy_meal_sbus_init(void)
static void happy_meal_sbus_exit(void)
{
- of_unregister_driver(&hme_sbus_driver);
+ of_unregister_platform_driver(&hme_sbus_driver);
quattro_sbus_free_irqs();
while (qfe_sbus_list) {
diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h
index efd2ca0fcad3..756b5bf3aa89 100644
--- a/drivers/net/sunhme.h
+++ b/drivers/net/sunhme.h
@@ -407,7 +407,7 @@ struct happy_meal {
void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
#endif
- /* This is either an of_device or a pci_dev. */
+ /* This is either an platform_device or a pci_dev. */
void *happy_dev;
struct device *dma_dev;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 7d9c33dd9d1a..2cf84e5968b2 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -250,7 +250,7 @@ struct lance_private {
int rx_new, tx_new;
int rx_old, tx_old;
- struct of_device *ledma; /* If set this points to ledma */
+ struct platform_device *ledma; /* If set this points to ledma */
char tpe; /* cable-selection is TPE */
char auto_select; /* cable-selection by carrier */
char burst_sizes; /* ledma SBus burst sizes */
@@ -265,8 +265,8 @@ struct lance_private {
char *name;
dma_addr_t init_block_dvma;
struct net_device *dev; /* Backpointer */
- struct of_device *op;
- struct of_device *lebuffer;
+ struct platform_device *op;
+ struct platform_device *lebuffer;
struct timer_list multicast_timer;
};
@@ -1272,7 +1272,7 @@ static void lance_free_hwresources(struct lance_private *lp)
if (lp->lregs)
of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE);
if (lp->dregs) {
- struct of_device *ledma = lp->ledma;
+ struct platform_device *ledma = lp->ledma;
of_iounmap(&ledma->resource[0], lp->dregs,
resource_size(&ledma->resource[0]));
@@ -1319,9 +1319,9 @@ static const struct net_device_ops sparc_lance_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit sparc_lance_probe_one(struct of_device *op,
- struct of_device *ledma,
- struct of_device *lebuffer)
+static int __devinit sparc_lance_probe_one(struct platform_device *op,
+ struct platform_device *ledma,
+ struct platform_device *lebuffer)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1474,7 +1474,7 @@ no_link_test:
dev->ethtool_ops = &sparc_lance_ethtool_ops;
dev->netdev_ops = &sparc_lance_ops;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
/* We cannot sleep if the chip is busy during a
* multicast list update event, because such events
@@ -1483,7 +1483,7 @@ no_link_test:
*/
init_timer(&lp->multicast_timer);
lp->multicast_timer.data = (unsigned long) dev;
- lp->multicast_timer.function = &lance_set_multicast_retry;
+ lp->multicast_timer.function = lance_set_multicast_retry;
if (register_netdev(dev)) {
printk(KERN_ERR "SunLance: Cannot register device.\n");
@@ -1503,9 +1503,9 @@ fail:
return -ENODEV;
}
-static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit sunlance_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
- struct of_device *parent = to_of_device(op->dev.parent);
+ struct platform_device *parent = to_platform_device(op->dev.parent);
struct device_node *parent_dp = parent->dev.of_node;
int err;
@@ -1519,7 +1519,7 @@ static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_d
return err;
}
-static int __devexit sunlance_sbus_remove(struct of_device *op)
+static int __devexit sunlance_sbus_remove(struct platform_device *op)
{
struct lance_private *lp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = lp->dev;
@@ -1558,12 +1558,12 @@ static struct of_platform_driver sunlance_sbus_driver = {
/* Find all the lance cards on the system and initialize them */
static int __init sparc_lance_init(void)
{
- return of_register_driver(&sunlance_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&sunlance_sbus_driver);
}
static void __exit sparc_lance_exit(void)
{
- of_unregister_driver(&sunlance_sbus_driver);
+ of_unregister_platform_driver(&sunlance_sbus_driver);
}
module_init(sparc_lance_init);
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 72b579c8d812..72e65d4666ef 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -689,7 +689,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
const struct linux_prom_registers *regs;
struct sunqe *qep = netdev_priv(dev);
- struct of_device *op;
+ struct platform_device *op;
strcpy(info->driver, "sunqe");
strcpy(info->version, "3.0");
@@ -720,7 +720,7 @@ static const struct ethtool_ops qe_ethtool_ops = {
};
/* This is only called once at boot time for each card probed. */
-static void qec_init_once(struct sunqec *qecp, struct of_device *op)
+static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
{
u8 bsizes = qecp->qec_bursts;
@@ -770,9 +770,9 @@ static u8 __devinit qec_get_burst(struct device_node *dp)
return bsizes;
}
-static struct sunqec * __devinit get_qec(struct of_device *child)
+static struct sunqec * __devinit get_qec(struct platform_device *child)
{
- struct of_device *op = to_of_device(child->dev.parent);
+ struct platform_device *op = to_platform_device(child->dev.parent);
struct sunqec *qecp;
qecp = dev_get_drvdata(&op->dev);
@@ -803,7 +803,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
qec_init_once(qecp, op);
- if (request_irq(op->irqs[0], qec_interrupt,
+ if (request_irq(op->archdata.irqs[0], qec_interrupt,
IRQF_SHARED, "qec", (void *) qecp)) {
printk(KERN_ERR "qec: Can't register irq.\n");
goto fail;
@@ -836,7 +836,7 @@ static const struct net_device_ops qec_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit qec_ether_init(struct of_device *op)
+static int __devinit qec_ether_init(struct platform_device *op)
{
static unsigned version_printed;
struct net_device *dev;
@@ -901,7 +901,7 @@ static int __devinit qec_ether_init(struct of_device *op)
SET_NETDEV_DEV(dev, &op->dev);
dev->watchdog_timeo = 5*HZ;
- dev->irq = op->irqs[0];
+ dev->irq = op->archdata.irqs[0];
dev->dma = 0;
dev->ethtool_ops = &qe_ethtool_ops;
dev->netdev_ops = &qec_ops;
@@ -941,12 +941,12 @@ fail:
return res;
}
-static int __devinit qec_sbus_probe(struct of_device *op, const struct of_device_id *match)
+static int __devinit qec_sbus_probe(struct platform_device *op, const struct of_device_id *match)
{
return qec_ether_init(op);
}
-static int __devexit qec_sbus_remove(struct of_device *op)
+static int __devexit qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = qp->dev;
@@ -988,18 +988,18 @@ static struct of_platform_driver qec_sbus_driver = {
static int __init qec_init(void)
{
- return of_register_driver(&qec_sbus_driver, &of_bus_type);
+ return of_register_platform_driver(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
- of_unregister_driver(&qec_sbus_driver);
+ of_unregister_platform_driver(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
- struct of_device *op = root_qec_dev->op;
+ struct platform_device *op = root_qec_dev->op;
- free_irq(op->irqs[0], (void *) root_qec_dev);
+ free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
of_iounmap(&op->resource[0], root_qec_dev->gregs,
GLOB_REG_SIZE);
kfree(root_qec_dev);
diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h
index 5813a7b2faa5..581781b6b2fa 100644
--- a/drivers/net/sunqe.h
+++ b/drivers/net/sunqe.h
@@ -314,7 +314,7 @@ struct sunqec {
void __iomem *gregs; /* QEC Global Registers */
struct sunqe *qes[4]; /* Each child MACE */
unsigned int qec_bursts; /* Support burst sizes */
- struct of_device *op; /* QEC's OF device */
+ struct platform_device *op; /* QEC's OF device */
struct sunqec *next_module; /* List of all QECs in system */
};
@@ -342,7 +342,7 @@ struct sunqe {
__u32 buffers_dvma; /* DVMA visible address. */
struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */
- struct of_device *op; /* QE's OF device struct */
+ struct platform_device *op; /* QE's OF device struct */
struct net_device *dev; /* QE's netdevice struct */
int channel; /* Who am I? */
};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index d281a7b34701..bf3c762de620 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -3,6 +3,8 @@
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -20,7 +22,6 @@
#include "sunvnet.h"
#define DRV_MODULE_NAME "sunvnet"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "June 25, 2007"
@@ -45,9 +46,9 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
{
struct vio_msg_tag *pkt = arg;
- printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
- printk(KERN_ERR PFX "Resetting connection.\n");
+ pr_err("Resetting connection\n");
ldc_disconnect(port->vio.lp);
@@ -400,8 +401,8 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
if (unlikely(pkt->seq != dr->rcv_nxt)) {
- printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] "
- "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt);
+ pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
+ pkt->seq, dr->rcv_nxt);
return 0;
}
@@ -464,8 +465,7 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
struct vio_net_mcast_info *pkt = msgbuf;
if (pkt->tag.stype != VIO_SUBTYPE_ACK)
- printk(KERN_ERR PFX "%s: Got unexpected MCAST reply "
- "[%02x:%02x:%04x:%08x]\n",
+ pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
port->vp->dev->name,
pkt->tag.type,
pkt->tag.stype,
@@ -520,7 +520,7 @@ static void vnet_event(void *arg, int event)
}
if (unlikely(event != LDC_EVENT_DATA_READY)) {
- printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
+ pr_warning("Unexpected LDC event %d\n", event);
spin_unlock_irqrestore(&vio->lock, flags);
return;
}
@@ -662,8 +662,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
dev->stats.tx_errors++;
}
spin_unlock_irqrestore(&port->vio.lock, flags);
@@ -696,8 +695,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
err = __vnet_tx_trigger(port);
if (unlikely(err < 0)) {
- printk(KERN_INFO PFX "%s: TX trigger error %d\n",
- dev->name, err);
+ netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
goto out_dropped_unlock;
@@ -952,12 +950,12 @@ static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
err = -ENOMEM;
if (!buf) {
- printk(KERN_ERR "TX buffer allocation failure\n");
+ pr_err("TX buffer allocation failure\n");
goto err_out;
}
err = -EFAULT;
if ((unsigned long)buf & (8UL - 1)) {
- printk(KERN_ERR "TX buffer misaligned\n");
+ pr_err("TX buffer misaligned\n");
kfree(buf);
goto err_out;
}
@@ -1030,7 +1028,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
dev = alloc_etherdev(sizeof(*vp));
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return ERR_PTR(-ENOMEM);
}
@@ -1056,12 +1054,11 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ pr_err("Cannot register net device, aborting\n");
goto err_out_free_dev;
}
- printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
+ netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
list_add(&vp->list, &vnet_list);
@@ -1133,10 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = {
static void __devinit print_version(void)
{
- static int version_printed;
-
- if (version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s", version);
}
const char *remote_macaddr_prop = "remote-mac-address";
@@ -1157,7 +1151,7 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
vp = vnet_find_parent(hp, vdev->mp);
if (IS_ERR(vp)) {
- printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
+ pr_err("Cannot find port parent vnet\n");
err = PTR_ERR(vp);
goto err_out_put_mdesc;
}
@@ -1165,15 +1159,14 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
- printk(KERN_ERR PFX "Port lacks %s property.\n",
- remote_macaddr_prop);
+ pr_err("Port lacks %s property\n", remote_macaddr_prop);
goto err_out_put_mdesc;
}
port = kzalloc(sizeof(*port), GFP_KERNEL);
err = -ENOMEM;
if (!port) {
- printk(KERN_ERR PFX "Cannot allocate vnet_port.\n");
+ pr_err("Cannot allocate vnet_port\n");
goto err_out_put_mdesc;
}
@@ -1214,9 +1207,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
dev_set_drvdata(&vdev->dev, port);
- printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n",
- vp->dev->name, port->raddr,
- switch_port ? " switch-port" : "");
+ pr_info("%s: PORT ( remote-mac %pM%s )\n",
+ vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
vio_port_up(&port->vio);
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 737df6032bbc..8b3dc1eb4015 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -92,7 +92,7 @@ static void bdx_rx_free(struct bdx_priv *priv);
static void bdx_tx_free(struct bdx_priv *priv);
/* Definitions needed by bdx_probe */
-static void bdx_ethtool_ops(struct net_device *netdev);
+static void bdx_set_ethtool_ops(struct net_device *netdev);
/*************************************************************************
* Print Info *
@@ -927,13 +927,6 @@ static void bdx_update_stats(struct bdx_priv *priv)
BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
}
-static struct net_device_stats *bdx_get_stats(struct net_device *ndev)
-{
- struct bdx_priv *priv = netdev_priv(ndev);
- struct net_device_stats *net_stat = &priv->net_stats;
- return net_stat;
-}
-
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
u16 rxd_vlan);
static void print_rxfd(struct rxf_desc *rxfd);
@@ -1220,6 +1213,7 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
{
+ struct net_device *ndev = priv->ndev;
struct sk_buff *skb, *skb2;
struct rxd_desc *rxdd;
struct rx_map *dm;
@@ -1273,7 +1267,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
if (unlikely(GET_RXD_ERR(rxd_val1))) {
DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
- priv->net_stats.rx_errors++;
+ ndev->stats.rx_errors++;
bdx_recycle_skb(priv, rxdd);
continue;
}
@@ -1300,15 +1294,16 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
bdx_rxdb_free_elem(db, rxdd->va_lo);
}
- priv->net_stats.rx_bytes += len;
+ ndev->stats.rx_bytes += len;
skb_put(skb, len);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->protocol = eth_type_trans(skb, priv->ndev);
+ skb->protocol = eth_type_trans(skb, ndev);
/* Non-IP packets aren't checksum-offloaded */
if (GET_RXD_PKT_ID(rxd_val1) == 0)
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
@@ -1316,7 +1311,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
break;
}
- priv->net_stats.rx_packets += done;
+ ndev->stats.rx_packets += done;
/* FIXME: do smth to minimize pci accesses */
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
@@ -1712,8 +1707,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#ifdef BDX_LLTX
ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
#endif
- priv->net_stats.tx_packets++;
- priv->net_stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
if (priv->tx_level < BDX_MIN_TX_LEVEL) {
DBG("%s: %s: TX Q STOP level %d\n",
@@ -1888,7 +1883,6 @@ static const struct net_device_ops bdx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bdx_ioctl,
.ndo_set_multicast_list = bdx_setmulti,
- .ndo_get_stats = bdx_get_stats,
.ndo_change_mtu = bdx_change_mtu,
.ndo_set_mac_address = bdx_set_mac,
.ndo_vlan_rx_register = bdx_vlan_rx_register,
@@ -2012,7 +2006,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev->netdev_ops = &bdx_netdev_ops;
ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
- bdx_ethtool_ops(ndev); /* ethtool interface */
+ bdx_set_ethtool_ops(ndev); /* ethtool interface */
/* these fields are used for info purposes only
* so we can have them same for all ports of the board */
@@ -2417,10 +2411,10 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
}
/*
- * bdx_ethtool_ops - ethtool interface implementation
+ * bdx_set_ethtool_ops - ethtool interface implementation
* @netdev
*/
-static void bdx_ethtool_ops(struct net_device *netdev)
+static void bdx_set_ethtool_ops(struct net_device *netdev)
{
static const struct ethtool_ops bdx_ethtool_ops = {
.get_settings = bdx_get_settings,
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 67e3b71bf705..b6ba8601e2b5 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -269,7 +269,6 @@ struct bdx_priv {
u32 msg_enable;
int stats_flag;
struct bdx_stats hw_stats;
- struct net_device_stats net_stats;
struct pci_dev *pdev;
struct pci_nic *nic;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78a869f..e7a2ba8b20a2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -1917,19 +1917,16 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
*/
static int tg3_phy_reset(struct tg3 *tp)
{
- u32 cpmuctrl;
- u32 phy_status;
+ u32 val, cpmuctrl;
int err;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 val;
-
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
}
- err = tg3_readphy(tp, MII_BMSR, &phy_status);
- err |= tg3_readphy(tp, MII_BMSR, &phy_status);
+ err = tg3_readphy(tp, MII_BMSR, &val);
+ err |= tg3_readphy(tp, MII_BMSR, &val);
if (err != 0)
return -EBUSY;
@@ -1961,18 +1958,14 @@ static int tg3_phy_reset(struct tg3 *tp)
return err;
if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
- u32 phy;
-
- phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
- tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
+ val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
- u32 val;
-
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2028,23 +2021,19 @@ out:
/* Cannot do read-modify-write on 5401 */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
- u32 phy_reg;
-
/* Set bit 14 with read-modify-write to preserve other bits */
if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
- !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
- tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
}
/* Set phy register 0x10 bit 0 to high fifo elasticity to support
* jumbo frames transmission.
*/
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
- u32 phy_reg;
-
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
+ if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
tg3_writephy(tp, MII_TG3_EXT_CTRL,
- phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3060,7 +3049,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
{
int current_link_up;
- u32 bmsr, dummy;
+ u32 bmsr, val;
u32 lcl_adv, rmt_adv;
u16 current_speed;
u8 current_duplex;
@@ -3140,8 +3129,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
/* Clear pending interrupts... */
- tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
- tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
+ tg3_readphy(tp, MII_TG3_ISTAT, &val);
if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
@@ -3162,8 +3151,6 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_INVALID;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
- u32 val;
-
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
if (!(val & (1 << 10))) {
@@ -3238,13 +3225,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
relink:
if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
- u32 tmp;
-
tg3_phy_copper_begin(tp);
- tg3_readphy(tp, MII_BMSR, &tmp);
- if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
- (tmp & BMSR_LSTATUS))
+ tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+ (bmsr & BMSR_LSTATUS))
current_link_up = 1;
}
@@ -4549,7 +4534,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
- struct tg3_rx_prodring_set *spr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
int dest_idx;
switch (opaque_key) {
@@ -4619,7 +4604,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
u32 sw_idx = tnapi->rx_rcb_ptr;
u16 hw_idx;
int received;
- struct tg3_rx_prodring_set *tpr = tnapi->prodring;
+ struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
hw_idx = *(tnapi->rx_rcb_prod_idx);
/*
@@ -4644,13 +4629,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
- ri = &tp->prodring[0].rx_std_buffers[desc_idx];
+ ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
+ ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &jmb_prod_idx;
@@ -4719,7 +4704,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
>> RXD_TCPCSUM_SHIFT) == 0xffff))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, tp->dev);
@@ -4981,14 +4966,14 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
work_done += tg3_rx(tnapi, budget - work_done);
if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
- struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
int i, err = 0;
u32 std_prod_idx = dpr->rx_std_prod_idx;
u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
for (i = 1; i < tp->irq_cnt; i++)
err |= tg3_rx_prodring_xfer(tp, dpr,
- tp->napi[i].prodring);
+ &tp->napi[i].prodring);
wmb();
@@ -5574,9 +5559,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
goto out_unlock;
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso_v6(skb)) {
hdrlen = skb_headlen(skb) - ETH_HLEN;
- else {
+ } else {
struct iphdr *iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
@@ -5798,7 +5783,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+ if (skb_is_gso_v6(skb)) {
hdr_len = skb_headlen(skb) - ETH_HLEN;
} else {
u32 ip_tcp_len;
@@ -6057,7 +6042,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
{
int i;
- if (tpr != &tp->prodring[0]) {
+ if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
i = (i + 1) % TG3_RX_RING_SIZE)
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
@@ -6103,7 +6088,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
tpr->rx_jmb_cons_idx = 0;
tpr->rx_jmb_prod_idx = 0;
- if (tpr != &tp->prodring[0]) {
+ if (tpr != &tp->napi[0].prodring) {
memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
memset(&tpr->rx_jmb_buffers[0], 0,
@@ -6253,7 +6238,7 @@ static void tg3_free_rings(struct tg3 *tp)
for (j = 0; j < tp->irq_cnt; j++) {
struct tg3_napi *tnapi = &tp->napi[j];
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
+ tg3_rx_prodring_free(tp, &tnapi->prodring);
if (!tnapi->tx_buffers)
continue;
@@ -6325,7 +6310,7 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
+ if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
tg3_free_rings(tp);
return -ENOMEM;
}
@@ -6361,6 +6346,8 @@ static void tg3_free_consistent(struct tg3 *tp)
tnapi->rx_rcb = NULL;
}
+ tg3_rx_prodring_fini(tp, &tnapi->prodring);
+
if (tnapi->hw_status) {
pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
tnapi->hw_status,
@@ -6374,9 +6361,6 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
-
- for (i = 0; i < tp->irq_cnt; i++)
- tg3_rx_prodring_fini(tp, &tp->prodring[i]);
}
/*
@@ -6387,11 +6371,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- for (i = 0; i < tp->irq_cnt; i++) {
- if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
- goto err_out;
- }
-
tp->hw_stats = pci_alloc_consistent(tp->pdev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping);
@@ -6413,6 +6392,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
+ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+ goto err_out;
+
/* If multivector TSS is enabled, vector 0 does not handle
* tx interrupts. Don't allocate any resources for it.
*/
@@ -6452,8 +6434,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
break;
}
- tnapi->prodring = &tp->prodring[i];
-
/*
* If multivector RSS is enabled, vector 0 does not handle
* rx or tx interrupts. Don't allocate any resources for it.
@@ -6596,6 +6576,10 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
int i;
u32 apedata;
+ /* NCSI does not support APE events */
+ if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
+ return;
+
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
return;
@@ -6647,6 +6631,8 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
APE_HOST_BEHAV_NO_PHYLOCK);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+ TG3_APE_HOST_DRVR_STATE_START);
event = APE_EVENT_STATUS_STATE_START;
break;
@@ -6658,6 +6644,16 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
*/
tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+ if (device_may_wakeup(&tp->pdev->dev) &&
+ (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
+ tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+ TG3_APE_HOST_WOL_SPEED_AUTO);
+ apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+ } else
+ apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+ tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
event = APE_EVENT_STATUS_STATE_UNLOAD;
break;
case RESET_KIND_SUSPEND:
@@ -7548,7 +7544,7 @@ static void tg3_rings_reset(struct tg3 *tp)
/* Zero mailbox registers. */
if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
- for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
+ for (i = 1; i < tp->irq_max; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -7631,7 +7627,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
- struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
tg3_disable_ints(tp);
@@ -8015,6 +8011,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
+ val = tr32(TG3_RDMA_RSRVCTRL_REG);
+ tw32(TG3_RDMA_RSRVCTRL_REG,
+ val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ }
+
/* Receive/send statistics. */
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
val = tr32(RCVLPC_STATS_ENABLE);
@@ -8817,16 +8823,9 @@ static bool tg3_enable_msix(struct tg3 *tp)
tp->napi[i].irq_vec = msix_ent[i].vector;
tp->dev->real_num_tx_queues = 1;
- if (tp->irq_cnt > 1) {
+ if (tp->irq_cnt > 1)
tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
- tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
- }
- }
-
return true;
}
@@ -9867,7 +9866,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tp->rx_pending = 63;
tp->rx_jumbo_pending = ering->rx_jumbo_pending;
- for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
+ for (i = 0; i < tp->irq_max; i++)
tp->napi[i].tx_pending = ering->tx_pending;
if (netif_running(dev)) {
@@ -10608,7 +10607,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
struct tg3_napi *tnapi, *rnapi;
- struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
tnapi = &tp->napi[0];
rnapi = &tp->napi[0];
@@ -12401,14 +12400,18 @@ skip_phy_reset:
static void __devinit tg3_read_vpd(struct tg3 *tp)
{
- u8 vpd_data[TG3_NVM_VPD_LEN];
+ u8 *vpd_data;
unsigned int block_end, rosize, len;
int j, i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
tg3_nvram_read(tp, 0x0, &magic))
- goto out_not_found;
+ goto out_no_vpd;
+
+ vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
+ if (!vpd_data)
+ goto out_no_vpd;
if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
@@ -12492,9 +12495,12 @@ partno:
memcpy(tp->board_part_number, &vpd_data[i], len);
- return;
-
out_not_found:
+ kfree(vpd_data);
+ if (!tp->board_part_number[0])
+ return;
+
+out_no_vpd:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
strcpy(tp->board_part_number, "BCM95906");
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
@@ -12736,10 +12742,12 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
- if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
+ tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
fwtype = "NCSI";
- else
+ } else {
fwtype = "DASH";
+ }
vlen = strlen(tp->fw_ver);
@@ -13410,10 +13418,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (err)
return err;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
- return -ENOTSUPP;
-
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
val &= GRC_MODE_HOST_STACKUP;
@@ -14442,7 +14446,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
}
if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
dev->netdev_ops = &tg3_netdev_ops;
else
@@ -14581,7 +14585,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
- for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
+ for (i = 0; i < tp->irq_max; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tnapi->tp = tp;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd190964..44733e4a68a2 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1302,7 +1302,11 @@
#define RDMAC_STATUS_FIFOURUN 0x00000080
#define RDMAC_STATUS_FIFOOREAD 0x00000100
#define RDMAC_STATUS_LNGREAD 0x00000200
-/* 0x4808 --> 0x4c00 unused */
+/* 0x4808 --> 0x4900 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG 0x00004900
+#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+/* 0x4904 --> 0x4c00 unused */
/* Write DMA control registers */
#define WDMAC_MODE 0x00004c00
@@ -2176,7 +2180,7 @@
#define TG3_APE_HOST_SEG_SIG 0x4200
#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
#define TG3_APE_HOST_SEG_LEN 0x4204
-#define APE_HOST_SEG_LEN_MAGIC 0x0000001c
+#define APE_HOST_SEG_LEN_MAGIC 0x00000020
#define TG3_APE_HOST_INIT_COUNT 0x4208
#define TG3_APE_HOST_DRIVER_ID 0x420c
#define APE_HOST_DRIVER_ID_LINUX 0xf0000000
@@ -2188,6 +2192,12 @@
#define APE_HOST_HEARTBEAT_INT_DISABLE 0
#define APE_HOST_HEARTBEAT_INT_5SEC 5000
#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218
+#define TG3_APE_HOST_DRVR_STATE 0x421c
+#define TG3_APE_HOST_DRVR_STATE_START 0x00000001
+#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002
+#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003
+#define TG3_APE_HOST_WOL_SPEED 0x4224
+#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000
#define TG3_APE_EVENT_STATUS 0x4300
@@ -2649,7 +2659,8 @@ struct tg3_rx_prodring_set {
dma_addr_t rx_jmb_mapping;
};
-#define TG3_IRQ_MAX_VECS 5
+#define TG3_IRQ_MAX_VECS_RSS 5
+#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
struct tg3_napi {
struct napi_struct napi ____cacheline_aligned;
@@ -2668,7 +2679,7 @@ struct tg3_napi {
u32 consmbox;
u32 rx_rcb_ptr;
u16 *rx_rcb_prod_idx;
- struct tg3_rx_prodring_set *prodring;
+ struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
struct tg3_tx_buffer_desc *tx_ring;
@@ -2755,8 +2766,6 @@ struct tg3 {
struct vlan_group *vlgrp;
#endif
- struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
-
/* begin "everything else" cacheline(s) section */
struct rtnl_link_stats64 net_stats;
@@ -2850,6 +2859,7 @@ struct tg3 {
#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
#define TG3_FLG3_L1PLLPD_EN 0x00800000
#define TG3_FLG3_5717_PLUS 0x01000000
+#define TG3_FLG3_APE_HAS_NCSI 0x02000000
struct timer_list timer;
u16 timer_counter;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ccee3eddc5f4..0564ca05963d 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -393,7 +393,7 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1453,7 +1453,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
TLan_DioWrite8( dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
if ( priv->timer.function == NULL ) {
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timerSetAt = jiffies;
@@ -1601,7 +1601,7 @@ drop_and_reuse:
TLan_DioWrite8( dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
if ( priv->timer.function == NULL ) {
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timerSetAt = jiffies;
@@ -1897,7 +1897,7 @@ static void TLan_Timer( unsigned long data )
TLan_DioWrite8( dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK );
} else {
- priv->timer.function = &TLan_Timer;
+ priv->timer.function = TLan_Timer;
priv->timer.expires = priv->timerSetAt
+ TLAN_TIMER_ACT_DELAY;
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 435ef7d5470f..08182fde3dcd 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1321,14 +1321,12 @@ static int tms380tr_reset_adapter(struct net_device *dev)
/* Clear CPHALT and start BUD */
SIFWRITEW(c, SIFACL);
- if (fw_entry)
- release_firmware(fw_entry);
+ release_firmware(fw_entry);
return (1);
}
} while(count == 0);
- if (fw_entry)
- release_firmware(fw_entry);
+ release_firmware(fw_entry);
printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
return (-1);
}
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 75a64c88cf7a..1d9fef74b39b 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1448,7 +1448,7 @@ de4x5_sw_reset(struct net_device *dev)
status = -EIO;
}
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
lp->tx_old = lp->tx_new;
return status;
@@ -1506,7 +1506,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
lp->stats.tx_bytes += skb->len;
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
@@ -1657,7 +1657,7 @@ de4x5_rx(struct net_device *dev)
}
/* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
+ for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
barrier();
}
@@ -1668,7 +1668,7 @@ de4x5_rx(struct net_device *dev)
/*
** Update entry information
*/
- lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
}
return 0;
@@ -1726,7 +1726,7 @@ de4x5_tx(struct net_device *dev)
}
/* Update all the pointers */
- lp->tx_old = (++lp->tx_old) % lp->txRingSize;
+ lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
}
/* Any resources available? */
@@ -1801,7 +1801,7 @@ de4x5_rx_ovfc(struct net_device *dev)
for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (++lp->rx_new % lp->rxRingSize);
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
}
outl(omr, DE4X5_OMR);
@@ -1932,7 +1932,7 @@ set_multicast_list(struct net_device *dev)
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
dev->trans_start = jiffies; /* prevent tx timeout */
}
@@ -3568,7 +3568,7 @@ ping_media(struct net_device *dev, int msec)
lp->tmp = lp->tx_new; /* Remember the ring position */
load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD);
}
@@ -5417,7 +5417,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
/* Set up the descriptor and give ownership to the card */
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (++lp->tx_new) % lp->txRingSize;
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
netif_wake_queue(dev); /* Unlock the TX ring */
break;
@@ -5474,7 +5474,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.lval[6] = inl(DE4X5_STRR); j+=4;
tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
ioc->len = j;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+ if (copy_to_user(ioc->data, tmp.lval, ioc->len)) return -EFAULT;
break;
#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 0bc4f3030a80..a9f7d5d1a269 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -599,7 +599,7 @@ static int dmfe_open(struct DEVICE *dev)
init_timer(&db->timer);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
db->timer.data = (unsigned long)dev;
- db->timer.function = &dmfe_timer;
+ db->timer.function = dmfe_timer;
add_timer(&db->timer);
return 0;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 1faf7a4d7202..0013642903ee 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -180,21 +180,24 @@ int tulip_poll(struct napi_struct *napi, int budget)
dev_warn(&dev->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
- tp->stats.rx_length_errors++;
- }
+ dev->stats.rx_length_errors++;
+ }
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
- tp->stats.rx_errors++; /* end of a packet.*/
- if (pkt_len > 1518 ||
- (status & RxDescRunt))
- tp->stats.rx_length_errors++;
-
- if (status & 0x0004) tp->stats.rx_frame_errors++;
- if (status & 0x0002) tp->stats.rx_crc_errors++;
- if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (pkt_len > 1518 ||
+ (status & RxDescRunt))
+ dev->stats.rx_length_errors++;
+
+ if (status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x0002)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x0001)
+ dev->stats.rx_fifo_errors++;
}
} else {
struct sk_buff *skb;
@@ -244,8 +247,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
netif_receive_skb(skb);
- tp->stats.rx_packets++;
- tp->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
}
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
received++;
@@ -404,20 +407,23 @@ static int tulip_rx(struct net_device *dev)
dev_warn(&dev->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
- tp->stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
- tp->stats.rx_errors++; /* end of a packet.*/
+ dev->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
- tp->stats.rx_length_errors++;
- if (status & 0x0004) tp->stats.rx_frame_errors++;
- if (status & 0x0002) tp->stats.rx_crc_errors++;
- if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ dev->stats.rx_length_errors++;
+ if (status & 0x0004)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x0002)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x0001)
+ dev->stats.rx_fifo_errors++;
}
} else {
struct sk_buff *skb;
@@ -467,8 +473,8 @@ static int tulip_rx(struct net_device *dev)
netif_rx(skb);
- tp->stats.rx_packets++;
- tp->stats.rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
}
received++;
entry = (++tp->cur_rx) % RX_RING_SIZE;
@@ -602,18 +608,22 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
dev->name, status);
#endif
- tp->stats.tx_errors++;
- if (status & 0x4104) tp->stats.tx_aborted_errors++;
- if (status & 0x0C00) tp->stats.tx_carrier_errors++;
- if (status & 0x0200) tp->stats.tx_window_errors++;
- if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ if (status & 0x4104)
+ dev->stats.tx_aborted_errors++;
+ if (status & 0x0C00)
+ dev->stats.tx_carrier_errors++;
+ if (status & 0x0200)
+ dev->stats.tx_window_errors++;
+ if (status & 0x0002)
+ dev->stats.tx_fifo_errors++;
if ((status & 0x0080) && tp->full_duplex == 0)
- tp->stats.tx_heartbeat_errors++;
+ dev->stats.tx_heartbeat_errors++;
} else {
- tp->stats.tx_bytes +=
+ dev->stats.tx_bytes +=
tp->tx_buffers[entry].skb->len;
- tp->stats.collisions += (status >> 3) & 15;
- tp->stats.tx_packets++;
+ dev->stats.collisions += (status >> 3) & 15;
+ dev->stats.tx_packets++;
}
pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
@@ -655,7 +665,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
if (csr5 == 0xffffffff)
break;
- if (csr5 & TxJabber) tp->stats.tx_errors++;
+ if (csr5 & TxJabber)
+ dev->stats.tx_errors++;
if (csr5 & TxFIFOUnderflow) {
if ((tp->csr6 & 0xC000) != 0xC000)
tp->csr6 += 0x4000; /* Bump up the Tx threshold */
@@ -672,8 +683,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
}
if (csr5 & RxDied) { /* Missed a Rx frame. */
- tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
- tp->stats.rx_errors++;
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+ dev->stats.rx_errors++;
tulip_start_rxtx(tp);
}
/*
@@ -789,7 +800,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
- tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+ dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
}
if (tulip_debug > 4)
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index e525875ed67d..ed66a16711dc 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -417,7 +417,6 @@ struct tulip_private {
int revision;
int flags;
struct napi_struct napi;
- struct net_device_stats stats;
struct timer_list timer; /* Media selection timer. */
struct timer_list oom_timer; /* Out of memory timer. */
u32 mc_filter[2];
@@ -570,7 +569,7 @@ static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __io
/* Trigger an immediate transmit demand. */
iowrite32(0, ioaddr + CSR1);
- tp->stats.tx_errors++;
+ tp->dev->stats.tx_errors++;
}
#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3a8d7efa2acf..2c39f2591216 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -725,7 +725,7 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
int status = le32_to_cpu(tp->tx_ring[entry].status);
if (status < 0) {
- tp->stats.tx_errors++; /* It wasn't Txed */
+ tp->dev->stats.tx_errors++; /* It wasn't Txed */
tp->tx_ring[entry].status = 0;
}
@@ -781,8 +781,8 @@ static void tulip_down (struct net_device *dev)
/* release any unconsumed transmit buffers */
tulip_clean_tx_ring(tp);
- if (ioread32 (ioaddr + CSR6) != 0xffffffff)
- tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
+ if (ioread32(ioaddr + CSR6) != 0xffffffff)
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
spin_unlock_irqrestore (&tp->lock, flags);
@@ -864,12 +864,12 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
spin_lock_irqsave (&tp->lock, flags);
- tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
+ dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
spin_unlock_irqrestore(&tp->lock, flags);
}
- return &tp->stats;
+ return &dev->stats;
}
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 96de5829b940..1dc27a557275 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -480,7 +480,7 @@ static int uli526x_open(struct net_device *dev)
init_timer(&db->timer);
db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
db->timer.data = (unsigned long)dev;
- db->timer.function = &uli526x_timer;
+ db->timer.function = uli526x_timer;
add_timer(&db->timer);
return 0;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 66d41cf8da29..f0b231035dee 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -662,7 +662,7 @@ static int netdev_open(struct net_device *dev)
init_timer(&np->timer);
np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
+ np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
return 0;
out_err:
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index a439e93be22d..5a73752be2ca 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -29,7 +29,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
@@ -181,19 +180,6 @@ static void print_binary(unsigned int number)
}
#endif
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct xircom_private *private = netdev_priv(dev);
-
- strcpy(info->driver, "xircom_cb");
- strcpy(info->bus_info, pci_name(private->pdev));
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
static const struct net_device_ops netdev_ops = {
.ndo_open = xircom_open,
.ndo_stop = xircom_close,
@@ -279,7 +265,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
setup_descriptors(private);
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 2e50077ff450..5dfb39539b3e 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -962,36 +962,34 @@ typhoon_do_get_stats(struct typhoon *tp)
* The extra status reported would be a good candidate for
* ethtool_ops->get_{strings,stats}()
*/
- stats->tx_packets = le32_to_cpu(s->txPackets);
- stats->tx_bytes = le64_to_cpu(s->txBytes);
- stats->tx_errors = le32_to_cpu(s->txCarrierLost);
- stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
- stats->collisions = le32_to_cpu(s->txMultipleCollisions);
- stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
- stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
- stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
+ stats->tx_packets = le32_to_cpu(s->txPackets) +
+ saved->tx_packets;
+ stats->tx_bytes = le64_to_cpu(s->txBytes) +
+ saved->tx_bytes;
+ stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
+ saved->tx_errors;
+ stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
+ saved->tx_carrier_errors;
+ stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
+ saved->collisions;
+ stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
+ saved->rx_packets;
+ stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
+ saved->rx_bytes;
+ stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
+ saved->rx_fifo_errors;
stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
- le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
- stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
- stats->rx_length_errors = le32_to_cpu(s->rxOversized);
+ le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
+ saved->rx_errors;
+ stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
+ saved->rx_crc_errors;
+ stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
+ saved->rx_length_errors;
tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
SPEED_100 : SPEED_10;
tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
- /* add in the saved statistics
- */
- stats->tx_packets += saved->tx_packets;
- stats->tx_bytes += saved->tx_bytes;
- stats->tx_errors += saved->tx_errors;
- stats->collisions += saved->collisions;
- stats->rx_packets += saved->rx_packets;
- stats->rx_bytes += saved->rx_bytes;
- stats->rx_fifo_errors += saved->rx_fifo_errors;
- stats->rx_errors += saved->rx_errors;
- stats->rx_crc_errors += saved->rx_crc_errors;
- stats->rx_length_errors += saved->rx_length_errors;
-
return 0;
}
@@ -1762,7 +1760,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
new_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
- new_skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(new_skb);
spin_lock(&tp->state_lock);
if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 8d532f9b50d0..a4c3f5708246 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3601,7 +3601,7 @@ static void ucc_geth_timeout(struct net_device *dev)
#ifdef CONFIG_PM
-static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
+static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
@@ -3629,7 +3629,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
return 0;
}
-static int ucc_geth_resume(struct of_device *ofdev)
+static int ucc_geth_resume(struct platform_device *ofdev)
{
struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
@@ -3732,7 +3732,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
-static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
+static int ucc_geth_probe(struct platform_device* ofdev, const struct of_device_id *match)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -3954,7 +3954,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
return 0;
}
-static int ucc_geth_remove(struct of_device* ofdev)
+static int ucc_geth_remove(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct net_device *dev = dev_get_drvdata(device);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d7b7018a1de1..52ffabe6db0e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -358,6 +358,14 @@ config USB_NET_ZAURUS
really need this non-conformant variant of CDC Ethernet (or in
some cases CDC MDLM) protocol, not "g_ether".
+config USB_NET_CX82310_ETH
+ tristate "Conexant CX82310 USB ethernet port"
+ depends on USB_USBNET
+ help
+ Choose this option if you're using a Conexant CX82310-based ADSL
+ router with USB ethernet port. This driver is for routers only,
+ it will not work with ADSL modems (use cxacru driver instead).
+
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b13a279663ba..a19b0259ae16 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
+obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
new file mode 100644
index 000000000000..8969f124c18c
--- /dev/null
+++ b/drivers/net/usb/cx82310_eth.c
@@ -0,0 +1,346 @@
+/*
+ * Driver for USB ethernet port of Conexant CX82310-based ADSL routers
+ * Copyright (C) 2010 by Ondrej Zary
+ * some parts inspired by the cxacru driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+
+enum cx82310_cmd {
+ CMD_START = 0x84, /* no effect? */
+ CMD_STOP = 0x85, /* no effect? */
+ CMD_GET_STATUS = 0x90, /* returns nothing? */
+ CMD_GET_MAC_ADDR = 0x91, /* read MAC address */
+ CMD_GET_LINK_STATUS = 0x92, /* not useful, link is always up */
+ CMD_ETHERNET_MODE = 0x99, /* unknown, needed during init */
+};
+
+enum cx82310_status {
+ STATUS_UNDEFINED,
+ STATUS_SUCCESS,
+ STATUS_ERROR,
+ STATUS_UNSUPPORTED,
+ STATUS_UNIMPLEMENTED,
+ STATUS_PARAMETER_ERROR,
+ STATUS_DBG_LOOPBACK,
+};
+
+#define CMD_PACKET_SIZE 64
+/* first command after power on can take around 8 seconds */
+#define CMD_TIMEOUT 15000
+#define CMD_REPLY_RETRY 5
+
+#define CX82310_MTU 1514
+#define CMD_EP 0x01
+
+/*
+ * execute control command
+ * - optionally send some data (command parameters)
+ * - optionally wait for the reply
+ * - optionally read some data from the reply
+ */
+static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
+ u8 *wdata, int wlen, u8 *rdata, int rlen)
+{
+ int actual_len, retries, ret;
+ struct usb_device *udev = dev->udev;
+ u8 *buf = kzalloc(CMD_PACKET_SIZE, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ /* create command packet */
+ buf[0] = cmd;
+ if (wdata)
+ memcpy(buf + 4, wdata, min_t(int, wlen, CMD_PACKET_SIZE - 4));
+
+ /* send command packet */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
+ CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&dev->udev->dev, "send command %#x: error %d\n",
+ cmd, ret);
+ goto end;
+ }
+
+ if (reply) {
+ /* wait for reply, retry if it's empty */
+ for (retries = 0; retries < CMD_REPLY_RETRY; retries++) {
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, CMD_EP),
+ buf, CMD_PACKET_SIZE, &actual_len,
+ CMD_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&dev->udev->dev,
+ "reply receive error %d\n", ret);
+ goto end;
+ }
+ if (actual_len > 0)
+ break;
+ }
+ if (actual_len == 0) {
+ dev_err(&dev->udev->dev, "no reply to command %#x\n",
+ cmd);
+ ret = -EIO;
+ goto end;
+ }
+ if (buf[0] != cmd) {
+ dev_err(&dev->udev->dev,
+ "got reply to command %#x, expected: %#x\n",
+ buf[0], cmd);
+ ret = -EIO;
+ goto end;
+ }
+ if (buf[1] != STATUS_SUCCESS) {
+ dev_err(&dev->udev->dev, "command %#x failed: %#x\n",
+ cmd, buf[1]);
+ ret = -EIO;
+ goto end;
+ }
+ if (rdata)
+ memcpy(rdata, buf + 4,
+ min_t(int, rlen, CMD_PACKET_SIZE - 4));
+ }
+end:
+ kfree(buf);
+ return ret;
+}
+
+#define partial_len data[0] /* length of partial packet data */
+#define partial_rem data[1] /* remaining (missing) data length */
+#define partial_data data[2] /* partial packet data */
+
+static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+ char buf[15];
+ struct usb_device *udev = dev->udev;
+
+ /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
+ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
+ && strcmp(buf, "USB NET CARD")) {
+ dev_info(&udev->dev, "ignoring: probably an ADSL modem\n");
+ return -ENODEV;
+ }
+
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
+
+ /*
+ * this must not include ethernet header as the device can send partial
+ * packets with no header (and sometimes even empty URBs)
+ */
+ dev->net->hard_header_len = 0;
+ /* we can send at most 1514 bytes of data (+ 2-byte header) per URB */
+ dev->hard_mtu = CX82310_MTU + 2;
+ /* we can receive URBs up to 4KB from the device */
+ dev->rx_urb_size = 4096;
+
+ dev->partial_data = (unsigned long) kmalloc(dev->hard_mtu, GFP_KERNEL);
+ if (!dev->partial_data)
+ return -ENOMEM;
+
+ /* enable ethernet mode (?) */
+ ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
+ if (ret) {
+ dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
+ ret);
+ goto err;
+ }
+
+ /* get the MAC address */
+ ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
+ dev->net->dev_addr, ETH_ALEN);
+ if (ret) {
+ dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
+ goto err;
+ }
+
+ /* start (does not seem to have any effect?) */
+ ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ kfree((void *)dev->partial_data);
+ return ret;
+}
+
+static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ kfree((void *)dev->partial_data);
+}
+
+/*
+ * RX is NOT easy - we can receive multiple packets per skb, each having 2-byte
+ * packet length at the beginning.
+ * The last packet might be incomplete (when it crosses the 4KB URB size),
+ * continuing in the next skb (without any headers).
+ * If a packet has odd length, there is one extra byte at the end (before next
+ * packet or at the end of the URB).
+ */
+static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int len;
+ struct sk_buff *skb2;
+
+ /*
+ * If the last skb ended with an incomplete packet, this skb contains
+ * end of that packet at the beginning.
+ */
+ if (dev->partial_rem) {
+ len = dev->partial_len + dev->partial_rem;
+ skb2 = alloc_skb(len, GFP_ATOMIC);
+ if (!skb2)
+ return 0;
+ skb_put(skb2, len);
+ memcpy(skb2->data, (void *)dev->partial_data,
+ dev->partial_len);
+ memcpy(skb2->data + dev->partial_len, skb->data,
+ dev->partial_rem);
+ usbnet_skb_return(dev, skb2);
+ skb_pull(skb, (dev->partial_rem + 1) & ~1);
+ dev->partial_rem = 0;
+ if (skb->len < 2)
+ return 1;
+ }
+
+ /* a skb can contain multiple packets */
+ while (skb->len > 1) {
+ /* first two bytes are packet length */
+ len = skb->data[0] | (skb->data[1] << 8);
+ skb_pull(skb, 2);
+
+ /* if last packet in the skb, let usbnet to process it */
+ if (len == skb->len || len + 1 == skb->len) {
+ skb_trim(skb, len);
+ break;
+ }
+
+ if (len > CX82310_MTU) {
+ dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
+ len);
+ return 0;
+ }
+
+ /* incomplete packet, save it for the next skb */
+ if (len > skb->len) {
+ dev->partial_len = skb->len;
+ dev->partial_rem = len - skb->len;
+ memcpy((void *)dev->partial_data, skb->data,
+ dev->partial_len);
+ skb_pull(skb, skb->len);
+ break;
+ }
+
+ skb2 = alloc_skb(len, GFP_ATOMIC);
+ if (!skb2)
+ return 0;
+ skb_put(skb2, len);
+ memcpy(skb2->data, skb->data, len);
+ /* process the packet */
+ usbnet_skb_return(dev, skb2);
+
+ skb_pull(skb, (len + 1) & ~1);
+ }
+
+ /* let usbnet process the last packet */
+ return 1;
+}
+
+/* TX is easy, just add 2 bytes of length at the beginning */
+static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int len = skb->len;
+
+ if (skb_headroom(skb) < 2) {
+ struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+ skb_push(skb, 2);
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+
+ return skb;
+}
+
+
+static const struct driver_info cx82310_info = {
+ .description = "Conexant CX82310 USB ethernet",
+ .flags = FLAG_ETHER,
+ .bind = cx82310_bind,
+ .unbind = cx82310_unbind,
+ .rx_fixup = cx82310_rx_fixup,
+ .tx_fixup = cx82310_tx_fixup,
+};
+
+#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_DEV_INFO, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bDeviceClass = (cl), \
+ .bDeviceSubClass = (sc), \
+ .bDeviceProtocol = (pr)
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+ .driver_info = (unsigned long) &cx82310_info
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver cx82310_driver = {
+ .name = "cx82310_eth",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+};
+
+static int __init cx82310_init(void)
+{
+ return usb_register(&cx82310_driver);
+}
+module_init(cx82310_init);
+
+static void __exit cx82310_exit(void)
+{
+ usb_deregister(&cx82310_driver);
+}
+module_exit(cx82310_exit);
+
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66b8766..4f123f869bdc 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -843,16 +843,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- struct hso_net *odev = netdev_priv(net);
-
- strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
- usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
-}
-
static const struct ethtool_ops ops = {
- .get_drvinfo = hso_get_drvinfo,
.get_link = ethtool_op_get_link
};
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6abacdd..b2bcf99e6f08 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
#define USB_PRODUCT_IPHONE 0x1290
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
+#define USB_PRODUCT_IPHONE_4 0x1297
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -424,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
.ndo_get_stats = &ipheth_stats,
};
-static struct device_type ipheth_type = {
- .name = "wwan",
-};
-
static int ipheth_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -445,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
netdev->netdev_ops = &ipheth_netdev_ops;
netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
- strcpy(netdev->name, "wwan%d");
+ strcpy(netdev->name, "eth%d");
dev = netdev_priv(netdev);
dev->udev = udev;
@@ -495,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
SET_NETDEV_DEV(netdev, &intf->dev);
SET_ETHTOOL_OPS(netdev, &ops);
- SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2b7b39cad1ce..5e98643a4a21 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -759,14 +759,6 @@ static int kaweth_close(struct net_device *net)
return 0;
}
-static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct kaweth_device *kaweth = netdev_priv(dev);
-
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
-}
-
static u32 kaweth_get_link(struct net_device *dev)
{
struct kaweth_device *kaweth = netdev_priv(dev);
@@ -775,7 +767,6 @@ static u32 kaweth_get_link(struct net_device *dev)
}
static const struct ethtool_ops ops = {
- .get_drvinfo = kaweth_get_drvinfo,
.get_link = kaweth_get_link
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7f62e2dea28f..ca7fc9df1ccf 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
static void rx_complete (struct urb *urb);
-static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
struct sk_buff *skb;
struct skb_data *entry;
@@ -327,7 +327,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
- return;
+ return -ENOMEM;
}
skb_reserve (skb, NET_IP_ALIGN);
@@ -357,6 +357,9 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, ifdown, dev->net, "device gone\n");
netif_device_detach (dev->net);
break;
+ case -EHOSTUNREACH:
+ retval = -ENOLINK;
+ break;
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
@@ -374,6 +377,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
}
+ return retval;
}
@@ -912,6 +916,7 @@ fail_halt:
/* tasklet could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
+ int resched = 1;
if (netif_running (dev->net))
urb = usb_alloc_urb (0, GFP_KERNEL);
@@ -922,10 +927,12 @@ fail_halt:
status = usb_autopm_get_interface(dev->intf);
if (status < 0)
goto fail_lowmem;
- rx_submit (dev, urb, GFP_KERNEL);
+ if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+ resched = 0;
usb_autopm_put_interface(dev->intf);
fail_lowmem:
- tasklet_schedule (&dev->bh);
+ if (resched)
+ tasklet_schedule (&dev->bh);
}
}
@@ -1175,8 +1182,11 @@ static void usbnet_bh (unsigned long param)
// don't refill the queue all at once
for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
urb = usb_alloc_urb (0, GFP_ATOMIC);
- if (urb != NULL)
- rx_submit (dev, urb, GFP_ATOMIC);
+ if (urb != NULL) {
+ if (rx_submit (dev, urb, GFP_ATOMIC) ==
+ -ENOLINK)
+ return;
+ }
}
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fd69095ef6e3..6884813b809c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1954,7 +1954,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
*/
static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
{
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
if (rd->rdesc1.CSM & CSM_IPKT) {
if (rd->rdesc1.CSM & CSM_IPOK) {
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index f7b33ae7a703..b5e120b0074b 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1504,22 +1504,25 @@ struct velocity_info {
* addresses on this chain then we use the first - multi-IP WOL is not
* supported.
*
- * CHECK ME: locking
*/
static inline int velocity_get_ip(struct velocity_info *vptr)
{
- struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr;
+ struct in_device *in_dev;
struct in_ifaddr *ifa;
+ int res = -ENOENT;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(vptr->dev);
if (in_dev != NULL) {
ifa = (struct in_ifaddr *) in_dev->ifa_list;
if (ifa != NULL) {
memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
- return 0;
+ res = 0;
}
}
- return -ENOENT;
+ rcu_read_unlock();
+ return res;
}
/**
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4598e9d2608f..bb6b67f6b0cc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -705,19 +705,6 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static void virtnet_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_device *vdev = vi->vdev;
-
- strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver));
- strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version));
- strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, dev_name(&vdev->dev),
- ARRAY_SIZE(drvinfo->bus_info));
-}
-
static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -830,7 +817,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .get_drvinfo = virtnet_get_drvinfo,
.set_tx_csum = virtnet_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index abe0ff53daf3..198ce92af0c3 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1042,11 +1042,11 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
skb->csum = htons(gdesc->rcd.csum);
skb->ip_summed = CHECKSUM_PARTIAL;
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
}
} else {
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
}
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c7c5605b3728..5378b849f54f 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -501,7 +501,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -2159,8 +2159,8 @@ start:
/* Alarm MSIX Vectors count */
vdev->intr_cnt++;
- vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
- GFP_KERNEL);
+ vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
+ GFP_KERNEL);
if (!vdev->entries) {
vxge_debug_init(VXGE_ERR,
"%s: memory allocation failed",
@@ -2169,9 +2169,9 @@ start:
goto alloc_entries_failed;
}
- vdev->vxge_entries =
- kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
+ vdev->vxge_entries = kcalloc(vdev->intr_cnt,
+ sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
VXGE_DRIVER_NAME);
@@ -2914,26 +2914,18 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
}
/**
- * vxge_get_stats
+ * vxge_get_stats64
* @dev: pointer to the device structure
+ * @stats: pointer to struct rtnl_link_stats64
*
- * Updates the device statistics structure. This function updates the device
- * statistics structure in the net_device structure and returns a pointer
- * to the same.
*/
-static struct net_device_stats *
-vxge_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
{
- struct vxgedev *vdev;
- struct net_device_stats *net_stats;
+ struct vxgedev *vdev = netdev_priv(dev);
int k;
- vdev = netdev_priv(dev);
-
- net_stats = &vdev->stats.net_stats;
-
- memset(net_stats, 0, sizeof(struct net_device_stats));
-
+ /* net_stats already zeroed by caller */
for (k = 0; k < vdev->no_of_vpath; k++) {
net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
@@ -3102,7 +3094,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static const struct net_device_ops vxge_netdev_ops = {
.ndo_open = vxge_open,
.ndo_stop = vxge_close,
- .ndo_get_stats = vxge_get_stats,
+ .ndo_get_stats64 = vxge_get_stats64,
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = vxge_set_multicast,
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 2e3b064b8e4b..d4be07eaacd7 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -172,7 +172,6 @@ struct vxge_msix_entry {
struct vxge_sw_stats {
/* Network Stats (interface stats) */
- struct net_device_stats net_stats;
/* Tx */
u64 tx_frms;
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 0bd898c94759..4ac85a09c5a6 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -264,7 +264,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
- return -EINVAL; /* No such clock setting */
+ return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index a5ddc6c8963e..164c3624ba89 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -73,7 +73,7 @@ static int reset_cyc2x(void __iomem *addr);
static int detect_cyc2x(void __iomem *addr);
/* Miscellaneous functions */
-static int get_option_index(long *optlist, long optval);
+static int get_option_index(const long *optlist, long optval);
static u16 checksum(u8 *buf, u32 len);
#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
@@ -81,23 +81,23 @@ static u16 checksum(u8 *buf, u32 len);
/* Global Data */
/* private data */
-static char modname[] = "cycx_drv";
-static char fullname[] = "Cyclom 2X Support Module";
-static char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+static const char modname[] = "cycx_drv";
+static const char fullname[] = "Cyclom 2X Support Module";
+static const char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
"<acme@conectiva.com.br>";
/* Hardware configuration options.
* These are arrays of configuration options used by verification routines.
* The first element of each array is its size (i.e. number of options).
*/
-static long cyc2x_dpmbase_options[] = {
+static const long cyc2x_dpmbase_options[] = {
20,
0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
};
-static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
+static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
/* Kernel Loadable Module Entry Points */
/* Module 'insert' entry point.
@@ -529,7 +529,7 @@ static int detect_cyc2x(void __iomem *addr)
/* Miscellaneous */
/* Get option's index into the options list.
* Return option's index (1 .. N) or zero if option is invalid. */
-static int get_option_index(long *optlist, long optval)
+static int get_option_index(const long *optlist, long optval)
{
int i = 1;
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index a0e8611ad8e8..859dba9b972e 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -81,9 +81,9 @@ static irqreturn_t cycx_isr(int irq, void *dev_id);
*/
/* private data */
-static char cycx_drvname[] = "cyclomx";
-static char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
-static char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
+static const char cycx_drvname[] = "cyclomx";
+static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
+static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
"<acme@conectiva.com.br>";
static int cycx_ncards = CONFIG_CYCX_CARDS;
static struct cycx_device *cycx_card_array; /* adapter data space */
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ad7719fe6d0a..e050bd65e037 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -885,20 +885,21 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
* Receive a frame through the DMA
*/
static inline void
-fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
- unsigned char *mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
+ dma_addr_t mem, int len)
{
/*
* This routine will setup the DMA and start it
*/
- dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
+ dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
+ (unsigned long) skb, (unsigned long) mem, len);
if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
}
- outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */
- outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */
+ outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
+ outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -1309,8 +1310,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
card->dma_port_rx = port;
card->dma_len_rx = len;
card->dma_rxpos = rxp;
- fst_rx_dma(card, (char *) card->rx_dma_handle_card,
- (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+ fst_rx_dma(card, card->rx_dma_handle_card,
+ BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
}
if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index b38ffa149aba..b1e5e5b69c2a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -191,7 +191,8 @@ static int cisco_rx(struct sk_buff *skb)
switch (ntohl (cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
- in_dev = dev->ip_ptr;
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
addr = 0;
mask = ~cpu_to_be32(0); /* is the mask correct? */
@@ -211,6 +212,7 @@ static int cisco_rx(struct sk_buff *skb)
cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
addr, mask);
}
+ rcu_read_unlock();
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 88e363033e23..6c571e198835 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -396,7 +396,7 @@ static void hss_config(struct port *port)
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_TX_PCR;
- msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
+ msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
if (port->clock_type == CLOCK_INT)
msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 4d4dc38c7290..7f5bb913c8b9 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -46,7 +46,7 @@
#include <net/x25device.h>
-static char bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
/* If this number is made larger, check that the temporary string buffer
* in lapbeth_new_device is large enough to store the probe device name.*/
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index e2c6f7f4f51c..43af85b8e45e 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1105,7 +1105,7 @@ static int lmc_open(struct net_device *dev)
init_timer (&sc->timer);
sc->timer.expires = jiffies + HZ;
sc->timer.data = (unsigned long) dev;
- sc->timer.function = &lmc_watchdog;
+ sc->timer.function = lmc_watchdog;
add_timer (&sc->timer);
lmc_trace(dev, "lmc_open out");
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5394b51bdb2f..7a3720f09ce3 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -282,7 +282,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
- return -EINVAL; /* No such clock setting */
+ return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index c6aa66e5b52f..fbf1175a07f1 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1,5 +1,5 @@
#define USE_PCI_CLOCK
-static char rcsid[] =
+static const char rcsid[] =
"Revision: 3.4.5 Date: 2002/03/07 ";
/*
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index e2cff64a446a..fd7375955e41 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -220,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
- return -EINVAL; /* No such clock setting */
+ return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index fbf5e843d48c..93956861ea21 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -766,7 +766,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
EXPORT_SYMBOL(z8530_interrupt);
-static char reg_init[16]=
+static const u8 reg_init[16]=
{
0,0,0,0,
0,0,0,0,
@@ -1206,7 +1206,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_close);
* it exists...
*/
-static char *z8530_type_name[]={
+static const char *z8530_type_name[]={
"Z8530",
"Z85C30",
"Z85230"
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index eb72c67699ab..f1549fff0edc 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -342,10 +342,10 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
model_name, dev->irq, dev->mem_start, dev->mem_end-1);
- ei_status.reset_8390 = &wd_reset_8390;
- ei_status.block_input = &wd_block_input;
- ei_status.block_output = &wd_block_output;
- ei_status.get_8390_hdr = &wd_get_8390_hdr;
+ ei_status.reset_8390 = wd_reset_8390;
+ ei_status.block_input = wd_block_input;
+ ei_status.block_output = wd_block_output;
+ ei_status.get_8390_hdr = wd_get_8390_hdr;
dev->netdev_ops = &wd_netdev_ops;
NS8390_init(dev, 0);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index c7528e5794d1..924ed095dd99 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2723,9 +2723,8 @@ static int airo_networks_allocate(struct airo_info *ai)
if (ai->networks)
return 0;
- ai->networks =
- kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
- GFP_KERNEL);
+ ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement),
+ GFP_KERNEL);
if (!ai->networks) {
airo_print_warn("", "Out of memory allocating beacons");
return -ENOMEM;
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 33bdc6a84e81..9a121a5b787c 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -32,7 +32,6 @@
#include <linux/timer.h>
#include <linux/netdevice.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -155,8 +154,6 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- win_req_t *req = priv_data;
-
if (cfg->index == 0)
return -ENODEV;
@@ -176,52 +173,25 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
return -ENODEV;
- /*
- Now set up a common memory window, if needed. There is room
- in the struct pcmcia_device structure for one memory window handle,
- but if the base addresses need to be saved, or if multiple
- windows are needed, the info should go in the private data
- structure for this device.
-
- Note that the memory window base is a physical address, and
- needs to be mapped to virtual space with ioremap() before it
- is used.
- */
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- memreq_t map;
- req->Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
- return -ENODEV;
- }
/* If we got this far, we're cool! */
return 0;
}
@@ -230,17 +200,12 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
static int airo_config(struct pcmcia_device *link)
{
local_info_t *dev;
- win_req_t *req;
int ret;
dev = link->priv;
dev_dbg(&link->dev, "airo_config\n");
- req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
/*
* In this loop, we scan the CIS for configuration table
* entries, each of which describes a valid card
@@ -255,7 +220,7 @@ static int airo_config(struct pcmcia_device *link)
* and most client drivers will only use the CIS to fill in
* implementation-defined details.
*/
- ret = pcmcia_loop_config(link, airo_cs_config_check, req);
+ ret = pcmcia_loop_config(link, airo_cs_config_check, NULL);
if (ret)
goto failed;
@@ -272,7 +237,7 @@ static int airo_config(struct pcmcia_device *link)
goto failed;
((local_info_t *)link->priv)->eth_dev =
init_airo_card(link->irq,
- link->io.BasePort1, 1, &link->dev);
+ link->resource[0]->start, 1, &link->dev);
if (!((local_info_t *)link->priv)->eth_dev)
goto failed;
@@ -282,22 +247,15 @@ static int airo_config(struct pcmcia_device *link)
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req->Base,
- req->Base+req->Size-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
- kfree(req);
return 0;
failed:
airo_release(link);
- kfree(req);
return -ENODEV;
} /* airo_config */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 504c6d648ecf..95072db0ec21 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -673,6 +673,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
PCI_DMA_TODEVICE);
rate = ieee80211_get_tx_rate(sc->hw, info);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index e2ae98feb0be..c4182359bee4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length = block[it+1];
length &= 0xff;
- if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
ath_print(common, ATH_DBG_EEPROM,
"Restore at %d: spot=%d "
"offset=%d length=%d\n",
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index e583421fcaa6..3030564a0f21 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
+#define CTL_MODE_M 0xf
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index a1c39526161a..345dd9721b41 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@ enum ctl_group {
#define NO_CTL 0xff
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
-#define CTL_MODE_M 7
#define CTL_11A 0
#define CTL_11B 1
#define CTL_11G 2
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index c2746fc7f2be..3b632161c106 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -42,7 +42,6 @@
#include <linux/moduleparam.h>
#include <linux/device.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -191,25 +190,23 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int atmel_config(struct pcmcia_device *link)
@@ -254,7 +251,7 @@ static int atmel_config(struct pcmcia_device *link)
((local_info_t*)link->priv)->eth_dev =
init_atmel_card(link->irq,
- link->io.BasePort1,
+ link->resource[0]->start,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
card_present,
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 0e99b634267c..dfbc41d431ff 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -26,7 +26,6 @@
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
@@ -65,7 +64,6 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
{
struct ssb_bus *ssb;
win_req_t win;
- memreq_t mem;
int err = -ENOMEM;
int res = 0;
@@ -78,12 +76,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
dev->conf.Attributes = CONF_ENABLE_IRQ;
dev->conf.IntType = INT_MEMORY_AND_IO;
- dev->io.BasePort2 = 0;
- dev->io.NumPorts2 = 0;
- dev->io.Attributes2 = 0;
-
- win.Attributes = WIN_ADDR_SPACE_MEM | WIN_MEMORY_TYPE_CM |
- WIN_ENABLE | WIN_DATA_WIDTH_16 |
+ win.Attributes = WIN_ENABLE | WIN_DATA_WIDTH_16 |
WIN_USE_WAIT;
win.Base = 0;
win.Size = SSB_CORE_SIZE;
@@ -92,9 +85,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
if (res != 0)
goto err_kfree_ssb;
- mem.CardOffset = 0;
- mem.Page = 0;
- res = pcmcia_map_mem_page(dev, dev->win, &mem);
+ res = pcmcia_map_mem_page(dev, dev->win, 0);
if (res != 0)
goto err_disable;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 5f0ce35bbe27..2466c0a52e5d 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -1183,7 +1183,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
len = bw << 1;
}
- samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL);
if (!samples) {
b43err(dev->wl, "allocation for samples generation failed\n");
return 0;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 29b31a694b59..ba54d1b04d22 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -12,7 +12,6 @@
#include <linux/wireless.h>
#include <net/iw_handler.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -23,7 +22,7 @@
#include "hostap_wlan.h"
-static dev_info_t dev_info = "hostap_cs";
+static char *dev_info = "hostap_cs";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
@@ -225,27 +224,18 @@ static int prism2_pccard_card_present(local_info_t *local)
static void sandisk_set_iobase(local_info_t *local)
{
int res;
- conf_reg_t reg;
struct hostap_cs_priv *hw_priv = local->hw_priv;
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = 0x10; /* 0x3f0 IO base 1 */
- reg.Value = hw_priv->link->io.BasePort1 & 0x00ff;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, 0x10,
+ hw_priv->link->resource[0]->start & 0x00ff);
if (res != 0) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
" res=%d\n", res);
}
udelay(10);
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = 0x12; /* 0x3f2 IO base 2 */
- reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, 0x12,
+ (hw_priv->link->resource[0]->start >> 8) & 0x00ff);
if (res != 0) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
" res=%d\n", res);
@@ -271,12 +261,11 @@ static void sandisk_write_hcr(local_info_t *local, int hcr)
static int sandisk_enable_wireless(struct net_device *dev)
{
int res, ret = 0;
- conf_reg_t reg;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct hostap_cs_priv *hw_priv = local->hw_priv;
- if (hw_priv->link->io.NumPorts1 < 0x42) {
+ if (resource_size(hw_priv->link->resource[0]) < 0x42) {
/* Not enough ports to be SanDisk multi-function card */
ret = -ENODEV;
goto done;
@@ -298,12 +287,8 @@ static int sandisk_enable_wireless(struct net_device *dev)
" - using vendor-specific initialization\n", dev->name);
hw_priv->sandisk_connectplus = 1;
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
dev->name, res);
@@ -311,16 +296,13 @@ static int sandisk_enable_wireless(struct net_device *dev)
}
mdelay(5);
- reg.Function = 0;
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
/*
* Do not enable interrupts here to avoid some bogus events. Interrupts
* will be enabled during the first cor_sreset call.
*/
- reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ (COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE |
+ COR_FUNC_ENA));
if (res != 0) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
dev->name, res);
@@ -343,30 +325,23 @@ done:
static void prism2_pccard_cor_sreset(local_info_t *local)
{
int res;
- conf_reg_t reg;
+ u8 val;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (!prism2_pccard_card_present(local))
return;
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- reg.Value = 0;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
res);
return;
}
printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n",
- reg.Value);
+ val);
- reg.Action = CS_WRITE;
- reg.Value |= COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ val |= COR_SOFT_RESET;
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
res);
@@ -375,11 +350,10 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
- reg.Value &= ~COR_SOFT_RESET;
+ val &= ~COR_SOFT_RESET;
if (hw_priv->sandisk_connectplus)
- reg.Value |= COR_IREQ_ENA;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ val |= COR_IREQ_ENA;
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
res);
@@ -396,8 +370,7 @@ static void prism2_pccard_cor_sreset(local_info_t *local)
static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
{
int res;
- conf_reg_t reg;
- int old_cor;
+ u8 old_cor;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (!prism2_pccard_card_present(local))
@@ -408,25 +381,17 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
return;
}
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- reg.Value = 0;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
"(%d)\n", res);
return;
}
printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
- reg.Value);
- old_cor = reg.Value;
+ old_cor);
- reg.Action = CS_WRITE;
- reg.Value |= COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ old_cor | COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
"(%d)\n", res);
@@ -436,11 +401,7 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
mdelay(10);
/* Setup Genesis mode */
- reg.Action = CS_WRITE;
- reg.Value = hcr;
- reg.Offset = CISREG_CCSR;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
"(%d)\n", res);
@@ -448,11 +409,8 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
}
mdelay(10);
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = old_cor & ~COR_SOFT_RESET;
- res = pcmcia_access_configuration_register(hw_priv->link,
- &reg);
+ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
+ old_cor & ~COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
"(%d)\n", res);
@@ -561,30 +519,24 @@ static int prism2_config_check(struct pcmcia_device *p_dev,
PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
"dflt->io.nwin=%d\n",
cfg->io.nwin, dflt->io.nwin);
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- PDEBUG(DEBUG_EXTRA, "io->flags = 0x%04X, "
- "io.base=0x%04x, len=%d\n", io->flags,
- io->win[0].base, io->win[0].len);
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags &
- CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int prism2_config(struct pcmcia_device *link)
@@ -646,7 +598,7 @@ static int prism2_config(struct pcmcia_device *link)
goto failed_unlock;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
spin_unlock_irqrestore(&local->irq_init_lock, flags);
@@ -658,12 +610,10 @@ static int prism2_config(struct pcmcia_device *link)
link->conf.Vpp % 10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
printk(", irq %d", link->irq);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->resource[0])
+ printk(" & %pR", link->resource[0]);
+ if (link->resource[1])
+ printk(" & %pR", link->resource[1]);
printk("\n");
local->shutdown = 0;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index ed69e60f86aa..61915f371416 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1921,9 +1921,9 @@ static int ipw2100_net_init(struct net_device *dev)
bg_band->band = IEEE80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
- bg_band->channels =
- kzalloc(geo->bg_channels *
- sizeof(struct ieee80211_channel), GFP_KERNEL);
+ bg_band->channels = kcalloc(geo->bg_channels,
+ sizeof(struct ieee80211_channel),
+ GFP_KERNEL);
if (!bg_band->channels) {
ipw2100_down(priv);
return -ENOMEM;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index cb2552a6777c..0f2508384c75 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11467,9 +11467,9 @@ static int ipw_net_init(struct net_device *dev)
bg_band->band = IEEE80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
- bg_band->channels =
- kzalloc(geo->bg_channels *
- sizeof(struct ieee80211_channel), GFP_KERNEL);
+ bg_band->channels = kcalloc(geo->bg_channels,
+ sizeof(struct ieee80211_channel),
+ GFP_KERNEL);
/* translate geo->bg to bg_band.channels */
for (i = 0; i < geo->bg_channels; i++) {
bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
@@ -11502,9 +11502,9 @@ static int ipw_net_init(struct net_device *dev)
a_band->band = IEEE80211_BAND_5GHZ;
a_band->n_channels = geo->a_channels;
- a_band->channels =
- kzalloc(geo->a_channels *
- sizeof(struct ieee80211_channel), GFP_KERNEL);
+ a_band->channels = kcalloc(geo->a_channels,
+ sizeof(struct ieee80211_channel),
+ GFP_KERNEL);
/* translate geo->bg to a_band.channels */
for (i = 0; i < geo->a_channels; i++) {
a_band->channels[i].band = IEEE80211_BAND_2GHZ;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index cb3b855d949c..1bbdb14f7d76 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -6,6 +6,8 @@
*
*/
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index a6fd70404c3d..e213a5dc049d 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -28,7 +28,6 @@
#include <linux/firmware.h>
#include <linux/netdevice.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
@@ -800,9 +799,9 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
unsigned int vcc,
void *priv_data)
{
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- p_dev->io.BasePort1 = cfg->io.win[0].base;
- p_dev->io.NumPorts1 = cfg->io.win[0].len;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ p_dev->resource[0]->start = cfg->io.win[0].base;
+ p_dev->resource[0]->end = cfg->io.win[0].len;
/* Do we need to allocate an interrupt? */
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
@@ -814,7 +813,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
}
/* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev, &p_dev->io);
+ return pcmcia_request_io(p_dev);
}
static int if_cs_probe(struct pcmcia_device *p_dev)
@@ -853,7 +852,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
goto out1;
/* Initialize io access */
- card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1);
+ card->iobase = ioport_map(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
if (!card->iobase) {
lbs_pr_err("error in ioport_map\n");
ret = -EIO;
@@ -872,9 +872,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
}
/* Finally, report what we've done */
- lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n",
- p_dev->irq, p_dev->io.BasePort1,
- p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
+ lbs_deb_cs("irq %d, io %pR", p_dev->irq, p_dev->resource[0]);
/*
* Most of the libertas cards can do unaligned register access, but some
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 024e5ca7b7f3..296fd00a5129 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -121,8 +121,10 @@ struct if_sdio_card {
const char *helper;
const char *firmware;
+ bool helper_allocated;
+ bool firmware_allocated;
- u8 buffer[65536];
+ u8 buffer[65536] __attribute__((aligned(4)));
spinlock_t lock;
struct if_sdio_packet *packets;
@@ -1104,6 +1106,10 @@ free:
kfree(packet);
}
+ if (card->helper_allocated)
+ kfree(card->helper);
+ if (card->firmware_allocated)
+ kfree(card->firmware);
kfree(card);
goto out;
@@ -1154,6 +1160,10 @@ static void if_sdio_remove(struct sdio_func *func)
kfree(packet);
}
+ if (card->helper_allocated)
+ kfree(card->helper);
+ if (card->firmware_allocated)
+ kfree(card->firmware);
kfree(card);
lbs_deb_leave(LBS_DEB_SDIO);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 238de10a4b57..e906616232a2 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -304,10 +304,13 @@ static int if_usb_probe(struct usb_interface *intf,
}
/* Upload firmware */
+ kparam_block_sysfs_write(fw_name);
if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
+ kparam_unblock_sysfs_write(fw_name);
lbs_deb_usbd(&udev->dev, "FW upload failed\n");
goto err_prog_firmware;
}
+ kparam_unblock_sysfs_write(fw_name);
if (!(priv = lbs_add_card(cardp, &udev->dev)))
goto err_prog_firmware;
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 1cf01acef5f0..ba7d96584cb6 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -824,12 +824,15 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
lbtf_deb_enter(LBTF_DEB_USB);
+ kparam_block_sysfs_write(fw_name);
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
if (ret < 0) {
pr_err("request_firmware() failed with %#x\n", ret);
pr_err("firmware %s not found\n", lbtf_fw_name);
+ kparam_unblock_sysfs_write(fw_name);
goto done;
}
+ kparam_unblock_sysfs_write(fw_name);
if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
goto release_fw;
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index b16d5db52a4d..ef46a2d88539 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -192,25 +191,23 @@ static int orinoco_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
goto next_entry;
}
return 0;
@@ -258,7 +255,8 @@ orinoco_cs_config(struct pcmcia_device *link)
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
* called. */
- mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
+ mem = ioport_map(link->resource[0]->start,
+ resource_size(link->resource[0]));
if (!mem)
goto failed;
@@ -280,7 +278,7 @@ orinoco_cs_config(struct pcmcia_device *link)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, link->io.BasePort1,
+ if (orinoco_if_add(priv, link->resource[0]->start,
link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index b51a9adc80f6..873877e17e1b 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -80,35 +79,27 @@ static int
spectrum_reset(struct pcmcia_device *link, int idle)
{
int ret;
- conf_reg_t reg;
- u_int save_cor;
+ u8 save_cor;
+ u8 ccsr;
/* Doing it if hardware is gone is guaranteed crash */
if (!pcmcia_dev_present(link))
return -ENODEV;
/* Save original COR value */
- reg.Function = 0;
- reg.Action = CS_READ;
- reg.Offset = CISREG_COR;
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_read_config_byte(link, CISREG_COR, &save_cor);
if (ret)
goto failed;
- save_cor = reg.Value;
/* Soft-Reset card */
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = (save_cor | COR_SOFT_RESET);
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_write_config_byte(link, CISREG_COR,
+ (save_cor | COR_SOFT_RESET));
if (ret)
goto failed;
udelay(1000);
/* Read CCSR */
- reg.Action = CS_READ;
- reg.Offset = CISREG_CCSR;
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_read_config_byte(link, CISREG_CCSR, &ccsr);
if (ret)
goto failed;
@@ -116,19 +107,15 @@ spectrum_reset(struct pcmcia_device *link, int idle)
* Start or stop the firmware. Memory width bit should be
* preserved from the value we've just read.
*/
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_CCSR;
- reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
- ret = pcmcia_access_configuration_register(link, &reg);
+ ccsr = (idle ? HCR_IDLE : HCR_RUN) | (ccsr & HCR_MEM16);
+ ret = pcmcia_write_config_byte(link, CISREG_CCSR, ccsr);
if (ret)
goto failed;
udelay(1000);
/* Restore original COR configuration index */
- reg.Action = CS_WRITE;
- reg.Offset = CISREG_COR;
- reg.Value = (save_cor & ~COR_SOFT_RESET);
- ret = pcmcia_access_configuration_register(link, &reg);
+ ret = pcmcia_write_config_byte(link, CISREG_COR,
+ (save_cor & ~COR_SOFT_RESET));
if (ret)
goto failed;
udelay(1000);
@@ -266,25 +253,23 @@ static int spectrum_cs_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
+ p_dev->resource[0]->end = p_dev->resource[1]->end = 0;
if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
+ p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
+ p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
+ p_dev->resource[0]->flags |=
+ pcmcia_io_cfg_data_width(io->flags);
+ p_dev->resource[0]->start = io->win[0].base;
+ p_dev->resource[0]->end = io->win[0].len;
if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
+ p_dev->resource[1]->flags = p_dev->resource[0]->flags;
+ p_dev->resource[1]->start = io->win[1].base;
+ p_dev->resource[1]->end = io->win[1].len;
}
/* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
+ if (pcmcia_request_io(p_dev) != 0)
goto next_entry;
}
return 0;
@@ -332,7 +317,8 @@ spectrum_cs_config(struct pcmcia_device *link)
/* We initialize the hermes structure before completing PCMCIA
* configuration just in case the interrupt handler gets
* called. */
- mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
+ mem = ioport_map(link->resource[0]->start,
+ resource_size(link->resource[0]));
if (!mem)
goto failed;
@@ -359,7 +345,7 @@ spectrum_cs_config(struct pcmcia_device *link)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, link->io.BasePort1,
+ if (orinoco_if_add(priv, link->resource[0]->start,
link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 8f67db487f57..76b2318a7dc7 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
- (!payload->status))
+ !(payload->status & P54_TX_FAILED))
info->flags |= IEEE80211_TX_STAT_ACK;
if (payload->status & P54_TX_PSM_CANCELLED)
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 390ccf6e08a3..5ca624a64c42 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -43,10 +43,8 @@
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
-#include <linux/ethtool.h>
#include <linux/ieee80211.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -81,8 +79,6 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map);
static struct net_device_stats *ray_get_stats(struct net_device *dev);
static int ray_dev_init(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-
static int ray_open(struct net_device *dev);
static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -315,9 +311,8 @@ static int ray_probe(struct pcmcia_device *p_dev)
local->finder = p_dev;
/* The io structure describes IO port mapping. None used here */
- p_dev->io.NumPorts1 = 0;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = 5;
+ p_dev->resource[0]->end = 0;
+ p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
@@ -335,7 +330,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
/* Raylink entries in the device structure */
dev->netdev_ops = &ray_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->wireless_handlers = &ray_handler_def;
#ifdef WIRELESS_SPY
local->wireless_data.spy_data = &local->spy_data;
@@ -394,7 +388,6 @@ static int ray_config(struct pcmcia_device *link)
int ret = 0;
int i;
win_req_t req;
- memreq_t mem;
struct net_device *dev = (struct net_device *)link->priv;
ray_dev_t *local = netdev_priv(dev);
@@ -431,9 +424,7 @@ static int ray_config(struct pcmcia_device *link)
ret = pcmcia_request_window(link, &req, &link->win);
if (ret)
goto failed;
- mem.CardOffset = 0x0000;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
+ ret = pcmcia_map_mem_page(link, link->win, 0);
if (ret)
goto failed;
local->sram = ioremap(req.Base, req.Size);
@@ -447,9 +438,7 @@ static int ray_config(struct pcmcia_device *link)
ret = pcmcia_request_window(link, &req, &local->rmem_handle);
if (ret)
goto failed;
- mem.CardOffset = 0x8000;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, local->rmem_handle, &mem);
+ ret = pcmcia_map_mem_page(link, local->rmem_handle, 0x8000);
if (ret)
goto failed;
local->rmem = ioremap(req.Base, req.Size);
@@ -463,9 +452,7 @@ static int ray_config(struct pcmcia_device *link)
ret = pcmcia_request_window(link, &req, &local->amem_handle);
if (ret)
goto failed;
- mem.CardOffset = 0x0000;
- mem.Page = 0;
- ret = pcmcia_map_mem_page(link, local->amem_handle, &mem);
+ ret = pcmcia_map_mem_page(link, local->amem_handle, 0);
if (ret)
goto failed;
local->amem = ioremap(req.Base, req.Size);
@@ -617,7 +604,7 @@ static int dl_startup_params(struct net_device *dev)
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
local->timer.data = (long)local;
- local->timer.function = &verify_dl_startup;
+ local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
"ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -793,7 +780,6 @@ static void ray_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
ray_dev_t *local = netdev_priv(dev);
- int i;
dev_dbg(&link->dev, "ray_release\n");
@@ -802,13 +788,6 @@ static void ray_release(struct pcmcia_device *link)
iounmap(local->sram);
iounmap(local->rmem);
iounmap(local->amem);
- /* Do bother checking to see if these succeed or not */
- i = pcmcia_release_window(link, local->amem_handle);
- if (i != 0)
- dev_dbg(&link->dev, "ReleaseWindow(local->amem) ret = %x\n", i);
- i = pcmcia_release_window(link, local->rmem_handle);
- if (i != 0)
- dev_dbg(&link->dev, "ReleaseWindow(local->rmem) ret = %x\n", i);
pcmcia_disable_device(link);
dev_dbg(&link->dev, "ray_release ending\n");
@@ -1079,18 +1058,6 @@ AP to AP 1 1 dest AP src AP dest source
}
} /* end encapsulate_frame */
-/*===========================================================================*/
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, "ray_cs");
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
/*====================================================================*/
/*------------------------------------------------------------------*/
@@ -2014,12 +1981,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
local->sparm.b4.a_current_ess_id);
- local->timer.function = &start_net;
+ local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
local->sparm.b4.a_current_ess_id);
- local->timer.function = &join_net;
+ local->timer.function = join_net;
}
add_timer(&local->timer);
}
@@ -2487,9 +2454,9 @@ static void authenticate(ray_dev_t *local)
del_timer(&local->timer);
if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
- local->timer.function = &join_net;
+ local->timer.function = join_net;
} else {
- local->timer.function = &authenticate_timeout;
+ local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
local->timer.data = (long)local;
@@ -2574,7 +2541,7 @@ static void associate(ray_dev_t *local)
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
local->timer.data = (long)local;
- local->timer.function = &join_net;
+ local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
return;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4b88909275a1..d49e830fa1da 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1494,7 +1494,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 46ef692e404d..2214c3231727 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1809,7 +1809,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 5843a6d8b627..6e94356265b3 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1717,7 +1717,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c7076deaecea..3bb67492d754 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3151,7 +3151,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 54dc44bb415c..c1710b27ba70 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -333,7 +333,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
if (*offset)
return 0;
- data = kzalloc(lines * MAX_LINE_LENGTH, GFP_KERNEL);
+ data = kcalloc(lines, MAX_LINE_LENGTH, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -380,7 +380,7 @@ static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
loff_t *offset)
{
struct rt2x00debug_intf *intf = file->private_data;
- char *name[] = { "WEP64", "WEP128", "TKIP", "AES" };
+ static const char * const name[] = { "WEP64", "WEP128", "TKIP", "AES" };
char *data;
char *temp;
size_t size;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6d41599a090c..eede99939db9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -803,7 +803,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
* Allocate all queue entries.
*/
entry_size = sizeof(*entries) + qdesc->priv_size;
- entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
+ entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -939,7 +939,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
*/
rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
- queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
+ queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
if (!queue) {
ERROR(rt2x00dev, "Queue allocation failed.\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 8940070fe2a2..97b3935f615b 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2660,7 +2660,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index cc6a72b09f46..e22f01c1818e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2100,7 +2100,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Create channel information array
*/
- info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
+ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c
index 9c80ba9b6be0..e4950c8e396e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.c
@@ -246,7 +246,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
wl->scan.req = req;
- wl->scan.scanned_ch = kzalloc(req->n_channels *
+ wl->scan.scanned_ch = kcalloc(req->n_channels,
sizeof(*wl->scan.scanned_ch),
GFP_KERNEL);
wl1271_scan_stm(wl);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 376c6b964a9c..420e9e986a18 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/types.h>
-#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/in.h>
@@ -48,7 +47,6 @@
#include <net/iw_handler.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -89,13 +87,6 @@
static int wl3501_config(struct pcmcia_device *link);
static void wl3501_release(struct pcmcia_device *link);
-/*
- * The dev_info variable is the "key" that is used to match up this
- * device driver with appropriate cards, through the card configuration
- * database.
- */
-static dev_info_t wl3501_dev_info = "wl3501_cs";
-
static const struct {
int reg_domain;
int min, max, deflt;
@@ -1419,15 +1410,6 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
return wstats;
}
-static void wl3501_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, wl3501_dev_info, sizeof(info->driver));
-}
-
-static const struct ethtool_ops ops = {
- .get_drvinfo = wl3501_get_drvinfo
-};
-
/**
* wl3501_detach - deletes a driver "instance"
* @link - FILL_IN
@@ -1892,9 +1874,8 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
struct wl3501_card *this;
/* The io structure describes IO port mapping */
- p_dev->io.NumPorts1 = 16;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = 5;
+ p_dev->resource[0]->end = 16;
+ p_dev->resource[0]->flags = IO_DATA_PATH_WIDTH_8;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
@@ -1914,7 +1895,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
this->p_dev = p_dev;
dev->wireless_data = &this->wireless_data;
dev->wireless_handlers = &wl3501_handler_def;
- SET_ETHTOOL_OPS(dev, &ops);
netif_stop_queue(dev);
p_dev->priv = dev;
@@ -1940,13 +1920,14 @@ static int wl3501_config(struct pcmcia_device *link)
/* Try allocating IO ports. This tries a few fixed addresses. If you
* want, you can also read the card's config table to pick addresses --
* see the serial driver for an example. */
+ link->io_lines = 5;
for (j = 0x280; j < 0x400; j += 0x20) {
/* The '^0x300' is so that we probe 0x300-0x3ff first, then
* 0x200-0x2ff, and so on, because this seems safer */
- link->io.BasePort1 = j;
- link->io.BasePort2 = link->io.BasePort1 + 0x10;
- i = pcmcia_request_io(link, &link->io);
+ link->resource[0]->start = j;
+ link->resource[1]->start = link->resource[0]->start + 0x10;
+ i = pcmcia_request_io(link);
if (i == 0)
break;
}
@@ -1968,7 +1949,7 @@ static int wl3501_config(struct pcmcia_device *link)
goto failed;
dev->irq = link->irq;
- dev->base_addr = link->io.BasePort1;
+ dev->base_addr = link->resource[0]->start;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev)) {
printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b50fedcef8ac..788a9bc1dbac 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1395,7 +1395,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
}
/* Common code used when first setting up, and when resuming. */
-static int talk_to_backend(struct xenbus_device *dev,
+static int talk_to_netback(struct xenbus_device *dev,
struct netfront_info *info)
{
const char *message;
@@ -1545,7 +1545,7 @@ static int xennet_connect(struct net_device *dev)
return -ENODEV;
}
- err = talk_to_backend(np->xbdev, np);
+ err = talk_to_netback(np->xbdev, np);
if (err)
return err;
@@ -1599,7 +1599,7 @@ static int xennet_connect(struct net_device *dev)
/**
* Callback received when the backend's state changes.
*/
-static void backend_changed(struct xenbus_device *dev,
+static void netback_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct netfront_info *np = dev_get_drvdata(&dev->dev);
@@ -1801,7 +1801,7 @@ static struct xenbus_driver netfront_driver = {
.probe = netfront_probe,
.remove = __devexit_p(xennet_remove),
.resume = netfront_resume,
- .otherend_changed = backend_changed,
+ .otherend_changed = netback_changed,
};
static int __init netif_init(void)
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index d04c5b262050..f3f8be5a35fa 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -20,7 +20,7 @@
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/slab.h>
-
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
@@ -641,7 +641,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_put(skb, len); /* Tell the skb how much data we got */
skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -1086,7 +1086,7 @@ static void xemaclite_remove_ndev(struct net_device *ndev)
*
* Return: Value of the parameter if the parameter is found, or 0 otherwise
*/
-static bool get_bool(struct of_device *ofdev, const char *s)
+static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
@@ -1115,7 +1115,7 @@ static struct net_device_ops xemaclite_netdev_ops;
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
-static int __devinit xemaclite_of_probe(struct of_device *ofdev,
+static int __devinit xemaclite_of_probe(struct platform_device *ofdev,
const struct of_device_id *match)
{
struct resource r_irq; /* Interrupt resources */
@@ -1240,7 +1240,7 @@ error2:
*
* Return: 0, always.
*/
-static int __devexit xemaclite_of_remove(struct of_device *of_dev)
+static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
{
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1269,6 +1269,16 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+xemaclite_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ xemaclite_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
@@ -1276,6 +1286,9 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
.ndo_get_stats = xemaclite_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xemaclite_poll_controller,
+#endif
};
/* Match table for OF platform binding */
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 4eb67aed68dd..cd1b3dcd61db 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -646,7 +646,7 @@ static int yellowfin_open(struct net_device *dev)
init_timer(&yp->timer);
yp->timer.expires = jiffies + 3*HZ;
yp->timer.data = (unsigned long)dev;
- yp->timer.function = &yellowfin_timer; /* timer handler */
+ yp->timer.function = yellowfin_timer; /* timer handler */
add_timer(&yp->timer);
return 0;