diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
30 files changed, 4976 insertions, 1052 deletions
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 25152715396b..fee4664c9189 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -118,6 +118,7 @@ config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" default m depends on 64BIT && PCI + depends on INET ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index be268dcde8fa..f9a4e76c5a8b 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -915,10 +915,8 @@ static int hip04_mac_probe(struct platform_device *pdev) } ret = register_netdev(ndev); - if (ret) { - free_netdev(ndev); + if (ret) goto alloc_fail; - } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 28e907831b0e..c62378c07e70 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev) */ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct phy_device *phy_dev = h->phy_dev; int ret; @@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (unlikely(ret)) return -ENODEV; - phy_dev->supported &= h->if_support; - phy_dev->advertising = phy_dev->supported; + ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, supported); + linkmode_copy(phy_dev->advertising, phy_dev->supported); if (h->phy_if == PHY_INTERFACE_MODE_XGMII) phy_dev->autoneg = false; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 774beda040a1..8e9b95871d30 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -624,7 +624,7 @@ static void hns_nic_self_test(struct net_device *ndev, clear_bit(NIC_STATE_TESTING, &priv->state); if (if_running) - (void)dev_open(ndev); + (void)dev_open(ndev, NULL); } /* Online tests aren't run; pass by default */ diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 002534f12b66..d01bf536eb86 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/ obj-$(CONFIG_HNS3) += hnae3.o obj-$(CONFIG_HNS3_ENET) += hns3.o -hns3-objs = hns3_enet.o hns3_ethtool.o +hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 038326cfda93..691d12174902 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -36,6 +36,10 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ + HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */ + HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ + HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ + HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ }; /* below are per-VF mac-vlan subcodes */ @@ -85,6 +89,12 @@ struct hclge_mbx_pf_to_vf_cmd { u16 msg[8]; }; +struct hclge_vf_rst_cmd { + u8 dest_vfid; + u8 vf_rst; + u8 rsv[22]; +}; + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 5b3b10479cd4..845d43d3a920 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -52,6 +52,7 @@ #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 #define HNAE3_DEV_SUPPORT_FD_B 0x6 +#define HNAE3_DEV_SUPPORT_GRO_B 0x7 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -65,6 +66,9 @@ #define hnae3_dev_fd_supported(hdev) \ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) +#define hnae3_dev_gro_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B) + #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) #define ring_ptr_move_bw(ring, p) \ @@ -124,14 +128,23 @@ enum hnae3_reset_notify_type { enum hnae3_reset_type { HNAE3_VF_RESET, + HNAE3_VF_FUNC_RESET, + HNAE3_VF_PF_FUNC_RESET, HNAE3_VF_FULL_RESET, + HNAE3_FLR_RESET, HNAE3_FUNC_RESET, HNAE3_CORE_RESET, HNAE3_GLOBAL_RESET, HNAE3_IMP_RESET, + HNAE3_UNKNOWN_RESET, HNAE3_NONE_RESET, }; +enum hnae3_flr_state { + HNAE3_FLR_DOWN, + HNAE3_FLR_DONE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -198,6 +211,10 @@ struct hnae3_ae_dev { * Enable the hardware * stop() * Disable the hardware + * start_client() + * Inform the hclge that client has been started + * stop_client() + * Inform the hclge that client has been stopped * get_status() * Get the carrier state of the back channel of the handle, 1 for ok, 0 for * non-ok @@ -293,17 +310,22 @@ struct hnae3_ae_dev { * Set vlan filter config of vf * enable_hw_strip_rxvtag() * Enable/disable hardware strip vlan tag of packets received + * set_gro_en + * Enable/disable HW GRO */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); - + void (*flr_prepare)(struct hnae3_ae_dev *ae_dev); + void (*flr_done)(struct hnae3_ae_dev *ae_dev); int (*init_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); void (*uninit_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); int (*start)(struct hnae3_handle *handle); void (*stop)(struct hnae3_handle *handle); + int (*client_start)(struct hnae3_handle *handle); + void (*client_stop)(struct hnae3_handle *handle); int (*get_status)(struct hnae3_handle *handle); void (*get_ksettings_an_result)(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex); @@ -432,10 +454,13 @@ struct hnae3_ae_ops { struct ethtool_rxnfc *cmd, u32 *rule_locs); int (*restore_fd_rules)(struct hnae3_handle *handle); void (*enable_fd)(struct hnae3_handle *handle, bool enable); - pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev); + int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf); + pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle); unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); + int (*set_gro_en)(struct hnae3_handle *handle, int enable); + u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); }; struct hnae3_dcb_ops { @@ -544,6 +569,7 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ u8 netdev_flags; + struct dentry *hnae3_dbgfs; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c new file mode 100644 index 000000000000..0de543faa5b1 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include <linux/debugfs.h> +#include <linux/device.h> + +#include "hnae3.h" +#include "hns3_enet.h" + +#define HNS3_DBG_READ_LEN 256 + +static struct dentry *hns3_dbgfs_root; + +static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf) +{ + struct hns3_nic_priv *priv = h->priv; + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_ring *ring; + u32 base_add_l, base_add_h; + u32 queue_num, queue_max; + u32 value, i = 0; + int cnt; + + if (!priv->ring_data) { + dev_err(&h->pdev->dev, "ring_data is NULL\n"); + return -EFAULT; + } + + queue_max = h->kinfo.num_tqps; + cnt = kstrtouint(&cmd_buf[11], 0, &queue_num); + if (cnt) + queue_num = 0; + else + queue_max = queue_num + 1; + + dev_info(&h->pdev->dev, "queue info\n"); + + if (queue_num >= h->kinfo.num_tqps) { + dev_err(&h->pdev->dev, + "Queue number(%u) is out of range(%u)\n", queue_num, + h->kinfo.num_tqps - 1); + return -EINVAL; + } + + ring_data = priv->ring_data; + for (i = queue_num; i < queue_max; i++) { + /* Each cycle needs to determine whether the instance is reset, + * to prevent reference to invalid memory. And need to ensure + * that the following code is executed within 100ms. + */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + + ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring; + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_L_REG); + dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i, + base_add_h, base_add_l); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_NUM_REG); + dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_LEN_REG); + dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_TAIL_REG); + dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG); + dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_FBDNUM_REG); + dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_PKTNUM_RECORD_REG); + dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); + + ring = ring_data[i].ring; + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_L_REG); + dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i, + base_add_h, base_add_l); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG); + dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TC_REG); + dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG); + dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_PKTNUM_RECORD_REG); + dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i, + value); + } + + return 0; +} + +static int hns3_dbg_queue_map(struct hnae3_handle *h) +{ + struct hns3_nic_priv *priv = h->priv; + struct hns3_nic_ring_data *ring_data; + int i; + + if (!h->ae_algo->ops->get_global_queue_id) + return -EOPNOTSUPP; + + dev_info(&h->pdev->dev, "map info for queue id and vector id\n"); + dev_info(&h->pdev->dev, + "local queue id | global queue id | vector id\n"); + for (i = 0; i < h->kinfo.num_tqps; i++) { + u16 global_qid; + + global_qid = h->ae_algo->ops->get_global_queue_id(h, i); + ring_data = &priv->ring_data[i]; + if (!ring_data || !ring_data->ring || + !ring_data->ring->tqp_vector) + continue; + + dev_info(&h->pdev->dev, + " %4d %4d %4d\n", + i, global_qid, + ring_data->ring->tqp_vector->vector_irq); + } + + return 0; +} + +static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf) +{ + struct hns3_nic_priv *priv = h->priv; + struct hns3_nic_ring_data *ring_data; + struct hns3_desc *rx_desc, *tx_desc; + struct device *dev = &h->pdev->dev; + struct hns3_enet_ring *ring; + u32 tx_index, rx_index; + u32 q_num, value; + int cnt; + + cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index); + if (cnt == 2) { + rx_index = tx_index; + } else if (cnt != 1) { + dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt); + return -EINVAL; + } + + if (q_num >= h->kinfo.num_tqps) { + dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num, + h->kinfo.num_tqps - 1); + return -EINVAL; + } + + ring_data = priv->ring_data; + ring = ring_data[q_num].ring; + value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + tx_index = (cnt == 1) ? value : tx_index; + + if (tx_index >= ring->desc_num) { + dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index, + ring->desc_num - 1); + return -EINVAL; + } + + tx_desc = &ring->desc[tx_index]; + dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index); + dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr); + dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag); + dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size); + dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); + dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); + dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); + dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); + dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag); + dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv); + dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec); + dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len); + dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len); + dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len); + dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen); + dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri); + dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss); + + ring = ring_data[q_num + h->kinfo.num_tqps].ring; + value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); + rx_index = (cnt == 1) ? value : tx_index; + rx_desc = &ring->desc[rx_index]; + + dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); + dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr); + dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len); + dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size); + dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash); + dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id); + dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag); + dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb); + dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag); + dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info); + + return 0; +} + +static void hns3_dbg_help(struct hnae3_handle *h) +{ +#define HNS3_DBG_BUF_LEN 256 + + char printf_buf[HNS3_DBG_BUF_LEN]; + + dev_info(&h->pdev->dev, "available commands\n"); + dev_info(&h->pdev->dev, "queue info [number]\n"); + dev_info(&h->pdev->dev, "queue map\n"); + dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n"); + dev_info(&h->pdev->dev, "dump fd tcam\n"); + dev_info(&h->pdev->dev, "dump tc\n"); + dev_info(&h->pdev->dev, "dump tm map [q_num]\n"); + dev_info(&h->pdev->dev, "dump tm\n"); + dev_info(&h->pdev->dev, "dump qos pause cfg\n"); + dev_info(&h->pdev->dev, "dump qos pri map\n"); + dev_info(&h->pdev->dev, "dump qos buf cfg\n"); + dev_info(&h->pdev->dev, "dump mng tbl\n"); + + memset(printf_buf, 0, HNS3_DBG_BUF_LEN); + strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]", + HNS3_DBG_BUF_LEN - 1); + strncat(printf_buf + strlen(printf_buf), + " [igu egu <prt_id>] [rpu <tc_queue_num>]", + HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); + strncat(printf_buf + strlen(printf_buf), + " [rtc] [ppp] [rcb] [tqp <q_num>]]\n", + HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); + dev_info(&h->pdev->dev, "%s", printf_buf); + + memset(printf_buf, 0, HNS3_DBG_BUF_LEN); + strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]", + HNS3_DBG_BUF_LEN - 1); + strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n", + HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); + dev_info(&h->pdev->dev, "%s", printf_buf); +} + +static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int uncopy_bytes; + char *buf; + int len; + + if (*ppos != 0) + return 0; + + if (count < HNS3_DBG_READ_LEN) + return -ENOSPC; + + buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); + uncopy_bytes = copy_to_user(buffer, buf, len); + + kfree(buf); + + if (uncopy_bytes) + return -EFAULT; + + return (*ppos = len); +} + +static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hnae3_handle *handle = filp->private_data; + struct hns3_nic_priv *priv = handle->priv; + char *cmd_buf, *cmd_buf_tmp; + int uncopied_bytes; + int ret = 0; + + if (*ppos != 0) + return 0; + + /* Judge if the instance is being reset. */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + uncopied_bytes = copy_from_user(cmd_buf, buffer, count); + if (uncopied_bytes) { + kfree(cmd_buf); + return -EFAULT; + } + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "help", 4) == 0) + hns3_dbg_help(handle); + else if (strncmp(cmd_buf, "queue info", 10) == 0) + ret = hns3_dbg_queue_info(handle, cmd_buf); + else if (strncmp(cmd_buf, "queue map", 9) == 0) + ret = hns3_dbg_queue_map(handle); + else if (strncmp(cmd_buf, "bd info", 7) == 0) + ret = hns3_dbg_bd_info(handle, cmd_buf); + else if (handle->ae_algo->ops->dbg_run_cmd) + ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf); + + if (ret) + hns3_dbg_help(handle); + + kfree(cmd_buf); + cmd_buf = NULL; + + return count; +} + +static const struct file_operations hns3_dbg_cmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hns3_dbg_cmd_read, + .write = hns3_dbg_cmd_write, +}; + +void hns3_dbg_init(struct hnae3_handle *handle) +{ + const char *name = pci_name(handle->pdev); + struct dentry *pfile; + + handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root); + if (!handle->hnae3_dbgfs) + return; + + pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle, + &hns3_dbg_cmd_fops); + if (!pfile) { + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; + dev_warn(&handle->pdev->dev, "create file for %s fail\n", + name); + } +} + +void hns3_dbg_uninit(struct hnae3_handle *handle) +{ + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; +} + +void hns3_dbg_register_debugfs(const char *debugfs_dir_name) +{ + hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL); + if (!hns3_dbgfs_root) { + pr_warn("Register debugfs for %s fail\n", debugfs_dir_name); + return; + } +} + +void hns3_dbg_unregister_debugfs(void) +{ + debugfs_remove_recursive(hns3_dbgfs_root); + hns3_dbgfs_root = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 183fd8344b80..69142a381064 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -15,6 +15,7 @@ #include <linux/vermagic.h> #include <net/gre.h> #include <net/pkt_cls.h> +#include <net/tcp.h> #include <net/vxlan.h> #include "hnae3.h" @@ -415,9 +416,6 @@ static void hns3_nic_net_down(struct net_device *netdev) const struct hnae3_ae_ops *ops; int i; - if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) - return; - /* disable vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_disable(&priv->tqp_vector[i]); @@ -439,6 +437,11 @@ static void hns3_nic_net_down(struct net_device *netdev) static int hns3_nic_net_stop(struct net_device *netdev) { + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return 0; + netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); @@ -1343,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev, priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; } + if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { + if (features & NETIF_F_GRO_HW) + ret = h->ae_algo->ops->set_gro_en(h, true); + else + ret = h->ae_algo->ops->set_gro_en(h, false); + if (ret) + return ret; + } + if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && h->ae_algo->ops->enable_vlan_filter) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) @@ -1561,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) { struct hnae3_handle *h = hns3_get_handle(netdev); - bool if_running = netif_running(netdev); int ret; if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; - /* if this was called with netdev up then bring netdevice down */ - if (if_running) { - (void)hns3_nic_net_stop(netdev); - msleep(100); - } - ret = h->ae_algo->ops->set_mtu(h, new_mtu); if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", @@ -1580,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) else netdev->mtu = new_mtu; - /* if the netdev was running earlier, bring it up again */ - if (if_running && hns3_nic_net_open(netdev)) - ret = -EINVAL; - return ret; } @@ -1712,8 +1713,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev) static void hns3_get_dev_capability(struct pci_dev *pdev, struct hnae3_ae_dev *ae_dev) { - if (pdev->revision >= 0x21) + if (pdev->revision >= 0x21) { hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); + } } /* hns3_probe - Device initialization routine @@ -1825,8 +1828,8 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_NONE; } - if (ae_dev->ops->process_hw_error) - ret = ae_dev->ops->process_hw_error(ae_dev); + if (ae_dev->ops->handle_hw_ras_error) + ret = ae_dev->ops->handle_hw_ras_error(ae_dev); else return PCI_ERS_RESULT_NONE; @@ -1849,9 +1852,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } +static void hns3_reset_prepare(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "hns3 flr prepare\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) + ae_dev->ops->flr_prepare(ae_dev); +} + +static void hns3_reset_done(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "hns3 flr done\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) + ae_dev->ops->flr_done(ae_dev); +} + static const struct pci_error_handlers hns3_err_handler = { .error_detected = hns3_error_detected, .slot_reset = hns3_slot_reset, + .reset_prepare = hns3_reset_prepare, + .reset_done = hns3_reset_done, }; static struct pci_driver hns3_driver = { @@ -1905,7 +1928,9 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; if (pdev->revision >= 0x21) { - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_GRO_HW; + netdev->features |= NETIF_F_GRO_HW; if (!(h->flags & HNAE3_SUPPORT_VF)) { netdev->hw_features |= NETIF_F_NTUPLE; @@ -2283,6 +2308,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (!(netdev->features & NETIF_F_RXCSUM)) return; + /* We MUST enable hardware checksum before enabling hardware GRO */ + if (skb_shinfo(skb)->gso_size) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + /* check if hardware has done checksum */ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) return; @@ -2326,6 +2357,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) { + if (skb_has_frag_list(skb)) + napi_gro_flush(&ring->tqp_vector->napi, false); + napi_gro_receive(&ring->tqp_vector->napi, skb); } @@ -2359,6 +2393,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, } } +static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, + unsigned char *va) +{ +#define HNS3_NEED_ADD_FRAG 1 + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct sk_buff *skb; + + ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); + skb = ring->skb; + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + ring->pending_buf = 1; + ring->frag_num = 0; + ring->tail_skb = NULL; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + return 0; + } + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + __skb_put(skb, ring->pull_len); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, + desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + return HNS3_NEED_ADD_FRAG; +} + +static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, + struct sk_buff **out_skb, bool pending) +{ + struct sk_buff *skb = *out_skb; + struct sk_buff *head_skb = *out_skb; + struct sk_buff *new_skb; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *pre_desc; + u32 bd_base_info; + int pre_bd; + + /* if there is pending bd, the SW param next_to_clean has moved + * to next and the next is NULL + */ + if (pending) { + pre_bd = (ring->next_to_clean - 1 + ring->desc_num) % + ring->desc_num; + pre_desc = &ring->desc[pre_bd]; + bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info); + } else { + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + } + + while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -ENXIO; + + if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { + new_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!new_skb)) { + netdev_err(ring->tqp->handle->kinfo.netdev, + "alloc rx skb frag fail\n"); + return -ENXIO; + } + ring->frag_num = 0; + + if (ring->tail_skb) { + ring->tail_skb->next = new_skb; + ring->tail_skb = new_skb; + } else { + skb_shinfo(skb)->frag_list = new_skb; + ring->tail_skb = new_skb; + } + } + + if (ring->tail_skb) { + head_skb->truesize += hnae3_buf_size(ring); + head_skb->data_len += le16_to_cpu(desc->rx.size); + head_skb->len += le16_to_cpu(desc->rx.size); + skb = ring->tail_skb; + } + + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + ring->pending_buf++; + } + + return 0; +} + +static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, + u32 bd_base_info) +{ + u16 gro_count; + u32 l3_type; + + gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M, + HNS3_RXD_GRO_COUNT_S); + /* if there is no HW GRO, do not set gro params */ + if (!gro_count) + return; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = gro_count; + + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + if (l3_type == HNS3_L3_TYPE_IPV4) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + else if (l3_type == HNS3_L3_TYPE_IPV6) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + return; + + skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, + HNS3_RXD_GRO_SIZE_M, + HNS3_RXD_GRO_SIZE_S); + if (skb_shinfo(skb)->gso_size) + tcp_gro_complete(skb); +} + static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, struct sk_buff *skb) { @@ -2375,18 +2556,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, } static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb, int *out_bnum) + struct sk_buff **out_skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; - struct sk_buff *skb; - unsigned char *va; u32 bd_base_info; - int pull_len; u32 l234info; int length; - int bnum; + int ret; desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; @@ -2398,9 +2577,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, /* Check valid BD */ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) - return -EFAULT; + return -ENXIO; - va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + if (!skb) + ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; /* Prefetch first cache line of first page * Idea is to cache few bytes of the header of the packet. Our L1 Cache @@ -2409,62 +2589,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, * lines. In such a case, single fetch would suffice to cache in the * relevant part of the header. */ - prefetch(va); + prefetch(ring->va); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(ring->va + L1_CACHE_BYTES); #endif - skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, - HNS3_RX_HEAD_SIZE); - if (unlikely(!skb)) { - netdev_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - - return -ENOMEM; - } - - prefetchw(skb->data); - - bnum = 1; - if (length <= HNS3_RX_HEAD_SIZE) { - memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + if (!skb) { + ret = hns3_alloc_skb(ring, length, ring->va); + *out_skb = skb = ring->skb; - /* We can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) - desc_cb->reuse_flag = 1; - else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); + if (ret < 0) /* alloc buffer fail */ + return ret; + if (ret > 0) { /* need add frag */ + ret = hns3_add_frag(ring, desc, &skb, false); + if (ret) + return ret; - ring_ptr_move_fw(ring, next_to_clean); + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + memcpy(skb->data, ring->va, + ALIGN(ring->pull_len, sizeof(long))); + } } else { - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); - - pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); - - memcpy(__skb_put(skb, pull_len), va, - ALIGN(pull_len, sizeof(long))); - - hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); + ret = hns3_add_frag(ring, desc, &skb, true); + if (ret) + return ret; - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - bnum++; - } + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + memcpy(skb->data, ring->va, + ALIGN(ring->pull_len, sizeof(long))); } - *out_bnum = bnum; - l234info = le32_to_cpu(desc->rx.l234_info); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Based on hw strategy, the tag offloaded will be stored at * ot_vlan_tag in two layer tag case, and stored at vlan_tag @@ -2514,7 +2674,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->tqp_vector->rx_group.total_bytes += skb->len; + /* This is needed in order to enable forwarding support */ + hns3_set_gro_param(skb, l234info, bd_base_info); + hns3_rx_checksum(ring, skb, desc); + *out_skb = skb; hns3_set_rx_skb_rss_type(ring, skb); return 0; @@ -2527,9 +2691,9 @@ int hns3_clean_rx_ring( #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring); - struct sk_buff *skb = NULL; - int num, bnum = 0; + int unused_count = hns3_desc_unused(ring) - ring->pending_buf; + struct sk_buff *skb = ring->skb; + int num; num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); rmb(); /* Make sure num taken effect before the other data is touched */ @@ -2543,24 +2707,32 @@ int hns3_clean_rx_ring( hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count); clean_count = 0; - unused_count = hns3_desc_unused(ring); + unused_count = hns3_desc_unused(ring) - + ring->pending_buf; } /* Poll one pkt */ - err = hns3_handle_rx_bd(ring, &skb, &bnum); + err = hns3_handle_rx_bd(ring, &skb); if (unlikely(!skb)) /* This fault cannot be repaired */ goto out; - recv_bds += bnum; - clean_count += bnum; - if (unlikely(err)) { /* Do jump the err */ - recv_pkts++; + if (err == -ENXIO) { /* Do not get FE for the packet */ + goto out; + } else if (unlikely(err)) { /* Do jump the err */ + recv_bds += ring->pending_buf; + clean_count += ring->pending_buf; + ring->skb = NULL; + ring->pending_buf = 0; continue; } /* Do update ip stack process */ skb->protocol = eth_type_trans(skb, netdev); rx_fn(ring, skb); + recv_bds += ring->pending_buf; + clean_count += ring->pending_buf; + ring->skb = NULL; + ring->pending_buf = 0; recv_pkts++; } @@ -2699,6 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) static int hns3_nic_common_poll(struct napi_struct *napi, int budget) { + struct hns3_nic_priv *priv = netdev_priv(napi->dev); struct hns3_enet_ring *ring; int rx_pkt_total = 0; @@ -2707,6 +2880,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int rx_budget; + if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + napi_complete(napi); + return 0; + } + /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ @@ -2731,9 +2909,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - napi_complete(napi); - hns3_update_new_int_gl(tqp_vector); - hns3_mask_vector_irq(tqp_vector, 1); + if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) && + napi_complete(napi)) { + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + } return rx_pkt_total; } @@ -3349,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; } +static int hns3_client_start(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_start) + return 0; + + return handle->ae_algo->ops->client_start(handle); +} + +static void hns3_client_stop(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_stop) + return; + + handle->ae_algo->ops->client_stop(handle); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -3416,10 +3612,18 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_reg_netdev_fail; } + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto out_reg_netdev_fail; + } + hns3_dcbnl_setup(handle); - /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ - netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + hns3_dbg_init(handle); + + /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ + netdev->max_mtu = HNS3_MAX_MTU; set_bit(HNS3_NIC_STATE_INITED, &priv->state); @@ -3444,6 +3648,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + hns3_client_stop(handle); + hns3_remove_hw_addr(netdev); if (netdev->reg_state != NETREG_UNINITIALIZED) @@ -3472,6 +3678,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); + hns3_dbg_uninit(handle); + priv->ring_data = NULL; out_netdev_free: @@ -3808,7 +4016,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Hardware table is only clear when pf resets */ if (!(handle->flags & HNAE3_SUPPORT_VF)) { ret = hns3_restore_vlan(netdev); - return ret; + if (ret) + return ret; } ret = hns3_restore_fd_rules(netdev); @@ -3818,21 +4027,31 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + return ret; + hns3_restore_coal(priv); ret = hns3_nic_init_vector_data(priv); if (ret) - return ret; + goto err_dealloc_vector; ret = hns3_init_all_ring(priv); - if (ret) { - hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; - } + if (ret) + goto err_uninit_vector; set_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; + +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +err_dealloc_vector: + hns3_nic_dealloc_vector_data(priv); + + return ret; } static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) @@ -3856,6 +4075,10 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) hns3_store_coal(priv); + ret = hns3_nic_dealloc_vector_data(priv); + if (ret) + netdev_err(netdev, "dealloc vector error\n"); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); @@ -4026,15 +4249,23 @@ static int __init hns3_init_module(void) INIT_LIST_HEAD(&client.node); + hns3_dbg_register_debugfs(hns3_driver_name); + ret = hnae3_register_client(&client); if (ret) - return ret; + goto err_reg_client; ret = pci_register_driver(&hns3_driver); if (ret) - hnae3_unregister_client(&client); + goto err_reg_driver; return ret; + +err_reg_driver: + hnae3_unregister_client(&client); +err_reg_client: + hns3_dbg_unregister_debugfs(); + return ret; } module_init(hns3_init_module); @@ -4046,6 +4277,7 @@ static void __exit hns3_exit_module(void) { pci_unregister_driver(&hns3_driver); hnae3_unregister_client(&client); + hns3_dbg_unregister_debugfs(); } module_exit(hns3_exit_module); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index cfd6a7109691..4e4b48ba78e3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -76,7 +76,10 @@ enum hns3_nic_state { #define HNS3_RING_MAX_PENDING 32768 #define HNS3_RING_MIN_PENDING 8 #define HNS3_RING_BD_MULTIPLE 8 -#define HNS3_MAX_MTU 9728 +/* max frame size of mac */ +#define HNS3_MAC_MAX_FRAME 9728 +#define HNS3_MAX_MTU \ + (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)) #define HNS3_BD_SIZE_512_TYPE 0 #define HNS3_BD_SIZE_1024_TYPE 1 @@ -109,6 +112,10 @@ enum hns3_nic_state { #define HNS3_RXD_DOI_B 21 #define HNS3_RXD_OL3E_B 22 #define HNS3_RXD_OL4E_B 23 +#define HNS3_RXD_GRO_COUNT_S 24 +#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) +#define HNS3_RXD_GRO_FIXID_B 30 +#define HNS3_RXD_GRO_ECN_B 31 #define HNS3_RXD_ODMAC_S 0 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) @@ -135,9 +142,8 @@ enum hns3_nic_state { #define HNS3_RXD_TSIND_S 12 #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) #define HNS3_RXD_LKBK_B 15 -#define HNS3_RXD_HDL_S 16 -#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) -#define HNS3_RXD_HSIND_B 31 +#define HNS3_RXD_GRO_SIZE_S 16 +#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S) #define HNS3_TXD_L3T_S 0 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) @@ -401,11 +407,19 @@ struct hns3_enet_ring { */ int next_to_clean; + int pull_len; /* head length for current packet */ + u32 frag_num; + unsigned char *va; /* first buffer address for current packet */ + u32 flag; /* ring attribute */ int irq_init_flag; int numa_node; cpumask_t affinity_mask; + + int pending_buf; + struct sk_buff *skb; + struct sk_buff *tail_skb; }; struct hns_queue; @@ -593,7 +607,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) { - return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET)); + return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET || + ae_dev->reset_type == HNAE3_FLR_RESET || + ae_dev->reset_type == HNAE3_VF_FUNC_RESET || + ae_dev->reset_type == HNAE3_VF_FULL_RESET || + ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET)); } #define hns3_read_dev(a, reg) \ @@ -665,4 +683,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle); static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} #endif +void hns3_dbg_init(struct hnae3_handle *handle); +void hns3_dbg_uninit(struct hnae3_handle *handle); +void hns3_dbg_register_debugfs(const char *debugfs_dir_name); +void hns3_dbg_unregister_debugfs(void); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 4563638367ac..e678b6939da3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -821,7 +821,7 @@ static int hns3_set_ringparam(struct net_device *ndev, } if (if_running) - ret = dev_open(ndev); + ret = dev_open(ndev, NULL); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index 580e81743681..fffe8c1c45d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -6,6 +6,6 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 690f62ed87dc..8af0cef5609b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) hdev->hw.cmq.crq.next_to_use = 0; hclge_cmd_init_regs(&hdev->hw); - clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); spin_unlock_bh(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if ((hclge_is_reset_pending(hdev))) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + return -EBUSY; + } + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 872cd4bdd70d..47717800e0b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -86,11 +86,24 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, + HCLGE_OPC_DFX_BD_NUM = 0x0043, + HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044, + HCLGE_OPC_DFX_SSU_REG_0 = 0x0045, + HCLGE_OPC_DFX_SSU_REG_1 = 0x0046, + HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047, + HCLGE_OPC_DFX_RPU_REG_0 = 0x0048, + HCLGE_OPC_DFX_RPU_REG_1 = 0x0049, + HCLGE_OPC_DFX_NCSI_REG = 0x004A, + HCLGE_OPC_DFX_RTC_REG = 0x004B, + HCLGE_OPC_DFX_PPP_REG = 0x004C, + HCLGE_OPC_DFX_RCB_REG = 0x004D, + HCLGE_OPC_DFX_TQP_REG = 0x004E, + HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050, /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, HCLGE_OPC_CONFIG_AN_MODE = 0x0304, - HCLGE_OPC_QUERY_AN_RESULT = 0x0306, HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, @@ -126,6 +139,16 @@ enum hclge_opcode_type { HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, + HCLGE_OPC_QSET_DFX_STS = 0x0844, + HCLGE_OPC_PRI_DFX_STS = 0x0845, + HCLGE_OPC_PG_DFX_STS = 0x0846, + HCLGE_OPC_PORT_DFX_STS = 0x0847, + HCLGE_OPC_SCH_NQ_CNT = 0x0848, + HCLGE_OPC_SCH_RQ_CNT = 0x0849, + HCLGE_OPC_TM_INTERNAL_STS = 0x0850, + HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, + HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, /* Packet buffer allocate commands */ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, @@ -142,6 +165,7 @@ enum hclge_opcode_type { HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04, HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, HCLGE_OPC_QUERY_RX_STATUS = 0x0B13, @@ -152,6 +176,7 @@ enum hclge_opcode_type { /* TSO command */ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, /* RSS commands */ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, @@ -210,27 +235,34 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* SFP command */ + HCLGE_OPC_SFP_GET_SPEED = 0x7104, + /* Error INT commands */ + HCLGE_MAC_COMMON_INT_EN = 0x030E, HCLGE_TM_SCH_ECC_INT_EN = 0x0829, - HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d, - HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f, - HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830, - HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831, - HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833, + HCLGE_SSU_ECC_INT_CMD = 0x0989, + HCLGE_SSU_COMMON_INT_CMD = 0x098C, + HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40, + HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41, + HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42, HCLGE_COMMON_ECC_INT_CFG = 0x1505, - HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802, + HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510, + HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511, + HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512, + HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, + HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, + HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, - HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804, - HCLGE_IGU_COMMON_INT_QUERY = 0x1805, HCLGE_IGU_COMMON_INT_EN = 0x1806, - HCLGE_IGU_COMMON_INT_CLR = 0x1807, HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, - HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17, HCLGE_PPP_CMD0_INT_CMD = 0x2100, HCLGE_PPP_CMD1_INT_CMD = 0x2101, - HCLGE_NCSI_INT_QUERY = 0x2400, + HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, HCLGE_NCSI_INT_EN = 0x2401, - HCLGE_NCSI_INT_CLR = 0x2402, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -542,20 +574,6 @@ struct hclge_config_mac_speed_dup_cmd { u8 rsv[22]; }; -#define HCLGE_QUERY_SPEED_S 3 -#define HCLGE_QUERY_AN_B 0 -#define HCLGE_QUERY_DUPLEX_B 2 - -#define HCLGE_QUERY_SPEED_M GENMASK(4, 0) -#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) -#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) - -struct hclge_query_an_speed_dup_cmd { - u8 an_syn_dup_speed; - u8 pause; - u8 rsv[23]; -}; - #define HCLGE_RING_ID_MASK GENMASK(9, 0) #define HCLGE_TQP_ENABLE_B 0 @@ -572,6 +590,11 @@ struct hclge_config_auto_neg_cmd { u8 rsv[20]; }; +struct hclge_sfp_speed_cmd { + __le32 sfp_speed; + u32 rsv[5]; +}; + #define HCLGE_MAC_UPLINK_PORT 0x100 struct hclge_config_max_frm_size_cmd { @@ -746,6 +769,24 @@ struct hclge_cfg_tx_queue_pointer_cmd { u8 rsv[14]; }; +#pragma pack(1) +struct hclge_mac_ethertype_idx_rd_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_add[6]; + __le16 index; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + __le16 rev0; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rev1[2]; +}; + +#pragma pack() + #define HCLGE_TSO_MSS_MIN_S 0 #define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0) @@ -758,6 +799,12 @@ struct hclge_cfg_tso_status_cmd { u8 rsv[20]; }; +#define HCLGE_GRO_EN_B 0 +struct hclge_cfg_gro_status_cmd { + __le16 gro_en; + u8 rsv[22]; +}; + #define HCLGE_TSO_MSS_MIN 256 #define HCLGE_TSO_MSS_MAX 9668 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index e72f724123d7..f6323b2501dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -35,7 +35,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, } } - return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + + return 0; } static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, @@ -70,25 +72,61 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) return 0; } +static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, + u8 *prio_tc) +{ + int i; + + if (num_tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, + "tc num checking failed, %u > tc_max(%u)\n", + num_tc, hdev->tc_max); + return -EINVAL; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (prio_tc[i] >= num_tc) { + dev_err(&hdev->pdev->dev, + "prio_tc[%u] checking failed, %u >= num_tc(%u)\n", + i, prio_tc[i], num_tc); + return -EINVAL; + } + } + + for (i = 0; i < hdev->num_alloc_vport; i++) { + if (num_tc > hdev->vport[i].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp(%u) checking failed, %u > tqp(%u)\n", + i, num_tc, hdev->vport[i].alloc_tqps); + return -EINVAL; + } + } + + return 0; +} + static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; + int ret; u8 i; - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (ets->prio_tc[i] >= hdev->tc_max || - i >= hdev->tc_max) - return -EINVAL; - + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) *changed = true; if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; + } + ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc); + if (ret) + return ret; + + for (i = 0; i < HNAE3_MAX_TC; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: if (hdev->tm_info.tc_info[i].tc_sch_mode != @@ -184,9 +222,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; - ret = hclge_tm_schd_info_update(hdev, num_tc); - if (ret) - return ret; + hclge_tm_schd_info_update(hdev, num_tc); ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) @@ -305,20 +341,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) return -EINVAL; - if (tc > hdev->tc_max) { - dev_err(&hdev->pdev->dev, - "setup tc failed, tc(%u) > tc_max(%u)\n", - tc, hdev->tc_max); - return -EINVAL; - } - - ret = hclge_tm_schd_info_update(hdev, tc); + ret = hclge_dcb_common_validate(hdev, tc, prio_tc); if (ret) - return ret; + return -EINVAL; - ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); - if (ret) - return ret; + hclge_tm_schd_info_update(hdev, tc); + hclge_tm_prio_tc_info_update(hdev, prio_tc); ret = hclge_tm_init_hw(hdev); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c new file mode 100644 index 000000000000..26d80504c730 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -0,0 +1,933 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include <linux/device.h> + +#include "hclge_debugfs.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hnae3.h" + +static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) +{ + struct hclge_desc desc[4]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); + desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); + + ret = hclge_cmd_send(&hdev->hw, desc, 4); + if (ret != HCLGE_CMD_EXEC_SUCCESS) { + dev_err(&hdev->pdev->dev, + "get dfx bdnum fail, status is %d.\n", ret); + return ret; + } + + return (int)desc[offset / 6].data[offset % 6]; +} + +static int hclge_dbg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, + int index, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int ret, i; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + desc->data[0] = cpu_to_le32(index); + + for (i = 1; i < bd_num; i++) { + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "read reg cmd send fail, status is %d.\n", ret); + return ret; + } + + return ret; +} + +static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, + struct hclge_dbg_dfx_message *dfx_message, + char *cmd_buf, int msg_num, int offset, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc_src; + struct hclge_desc *desc; + int bd_num, buf_len; + int ret, i; + int index; + int max; + + ret = kstrtouint(cmd_buf, 10, &index); + index = (ret != 0) ? 0 : index; + + bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); + if (bd_num <= 0) + return; + + buf_len = sizeof(struct hclge_desc) * bd_num; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + dev_err(&hdev->pdev->dev, "call kzalloc failed\n"); + return; + } + + desc = desc_src; + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd); + if (ret != HCLGE_CMD_EXEC_SUCCESS) { + kfree(desc_src); + return; + } + + max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; + + desc = desc_src; + for (i = 0; i < max; i++) { + (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; + if (dfx_message->flag) + dev_info(&hdev->pdev->dev, "%s: 0x%x\n", + dfx_message->message, desc->data[i % 6]); + + dfx_message++; + } + + kfree(desc_src); +} + +static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_dbg_bitmap_cmd *bitmap; + int rq_id, pri_id, qset_id; + int port_id, nq_id, pg_id; + struct hclge_desc desc[2]; + + int cnt, ret; + + cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", + &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); + if (cnt != 6) { + dev_err(&hdev->pdev->dev, + "dump dcb: bad command parameter, cnt=%d\n", cnt); + return; + } + + ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, + HCLGE_OPC_QSET_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); + dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); + dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); + + ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); + dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); + + ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); + dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_PORT_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); + + ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT); + if (ret) + return; + + dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]); + + ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT); + if (ret) + return; + + dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]); + + ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS); + if (ret) + return; + + dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]); + dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]); + dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]); + dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]); + dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]); + dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]); + dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_CNT); + if (ret) + return; + + dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]); + dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_STS_1); + if (ret) + return; + + dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]); + dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]); + dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]); + dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]); + dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]); +} + +static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf) +{ + int msg_num; + + if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { + msg_num = sizeof(hclge_dbg_bios_common_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, + &cmd_buf[21], msg_num, + HCLGE_DBG_DFX_BIOS_OFFSET, + HCLGE_OPC_DFX_BIOS_COMMON_REG); + } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { + msg_num = sizeof(hclge_dbg_ssu_reg_0) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_SSU_0_OFFSET, + HCLGE_OPC_DFX_SSU_REG_0); + + msg_num = sizeof(hclge_dbg_ssu_reg_1) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_SSU_1_OFFSET, + HCLGE_OPC_DFX_SSU_REG_1); + + msg_num = sizeof(hclge_dbg_ssu_reg_2) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_SSU_2_OFFSET, + HCLGE_OPC_DFX_SSU_REG_2); + } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { + msg_num = sizeof(hclge_dbg_igu_egu_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, + &cmd_buf[17], msg_num, + HCLGE_DBG_DFX_IGU_OFFSET, + HCLGE_OPC_DFX_IGU_EGU_REG); + } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { + msg_num = sizeof(hclge_dbg_rpu_reg_0) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RPU_0_OFFSET, + HCLGE_OPC_DFX_RPU_REG_0); + + msg_num = sizeof(hclge_dbg_rpu_reg_1) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RPU_1_OFFSET, + HCLGE_OPC_DFX_RPU_REG_1); + } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { + msg_num = sizeof(hclge_dbg_ncsi_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, + &cmd_buf[14], msg_num, + HCLGE_DBG_DFX_NCSI_OFFSET, + HCLGE_OPC_DFX_NCSI_REG); + } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { + msg_num = sizeof(hclge_dbg_rtc_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RTC_OFFSET, + HCLGE_OPC_DFX_RTC_REG); + } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { + msg_num = sizeof(hclge_dbg_ppp_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_PPP_OFFSET, + HCLGE_OPC_DFX_PPP_REG); + } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { + msg_num = sizeof(hclge_dbg_rcb_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RCB_OFFSET, + HCLGE_OPC_DFX_RCB_REG); + } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { + msg_num = sizeof(hclge_dbg_tqp_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_TQP_OFFSET, + HCLGE_OPC_DFX_TQP_REG); + } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { + hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); + } else { + dev_info(&hdev->pdev->dev, "unknown command\n"); + return; + } +} + +static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, + char *title_buf, char *true_buf, + char *false_buf) +{ + if (flag) + dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, + true_buf); + else + dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, + false_buf); +} + +static void hclge_dbg_dump_tc(struct hclge_dev *hdev) +{ + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret); + return; + } + + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + dev_info(&hdev->pdev->dev, "dump tc\n"); + dev_info(&hdev->pdev->dev, "weight_offset: %u\n", + ets_weight->weight_offset); + + for (i = 0; i < HNAE3_MAX_TC; i++) + hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i, + "tc", "no sp mode", "sp mode"); +} + +static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) +{ + struct hclge_port_shapping_cmd *port_shap_cfg_cmd; + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int ret; + + cmd = HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); + dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", + pg_shap_cfg_cmd->pg_shapping_para); + + cmd = HCLGE_OPC_TM_PG_P_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); + dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", + pg_shap_cfg_cmd->pg_shapping_para); + + cmd = HCLGE_OPC_TM_PORT_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", + port_shap_cfg_cmd->port_shapping_para); + + cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]); + + cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]); + + cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]); + + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n", + bp_to_qs_map_cmd->tc_id); + dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n", + bp_to_qs_map_cmd->qs_group_id); + dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", + bp_to_qs_map_cmd->qs_bit_map); + return; + +err_tm_pg_cmd_send: + dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n", + cmd, ret); +} + +static void hclge_dbg_dump_tm(struct hclge_dev *hdev) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; + struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; + struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + struct hclge_pg_weight_cmd *pg_weight; + struct hclge_qs_weight_cmd *qs_weight; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int ret; + + cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "dump tm\n"); + dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", + pg_to_pri_map->pg_id); + dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", + pg_to_pri_map->pri_bit_map); + + cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", + qs_to_pri_map->qs_id); + dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", + qs_to_pri_map->priority); + dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", + qs_to_pri_map->link_vld); + + cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); + dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n", + nq_to_qs_map->qset_id); + + cmd = HCLGE_OPC_TM_PG_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + pg_weight = (struct hclge_pg_weight_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); + dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); + + cmd = HCLGE_OPC_TM_QS_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id); + dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); + + cmd = HCLGE_OPC_TM_PRI_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); + dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); + + cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); + dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", + shap_cfg_cmd->pri_shapping_para); + + cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); + dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", + shap_cfg_cmd->pri_shapping_para); + + hclge_dbg_dump_tm_pg(hdev); + + return; + +err_tm_cmd_send: + dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n", + cmd, ret); +} + +static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_tqp_tx_queue_tc_cmd *tc; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int queue_id, group_id; + u32 qset_maping[32]; + int tc_id, qset_id; + int pri_id, ret; + u32 i; + + ret = kstrtouint(&cmd_buf[12], 10, &queue_id); + queue_id = (ret != 0) ? 0 : queue_id; + + cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; + nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + nq_to_qs_map->nq_id = cpu_to_le16(queue_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + qset_id = nq_to_qs_map->qset_id & 0x3FF; + + cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + pri_id = map->priority; + + cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; + tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + tc->queue_id = cpu_to_le16(queue_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + tc_id = tc->tc_id & 0x7; + + dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); + dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", + queue_id, qset_id, pri_id, tc_id); + + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + for (group_id = 0; group_id < 32; group_id++) { + hclge_cmd_setup_basic_desc(&desc, cmd, true); + bp_to_qs_map_cmd->tc_id = tc_id; + bp_to_qs_map_cmd->qs_group_id = group_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + + qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map; + } + + dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); + + i = 0; + for (group_id = 0; group_id < 4; group_id++) { + dev_info(&hdev->pdev->dev, + "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", + group_id * 256, qset_maping[(u32)(i + 7)], + qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)], + qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)], + qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)], + qset_maping[i]); + i += 8; + } + + return; + +err_tm_map_cmd_send: + dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n", + cmd, ret); +} + +static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n", + ret); + return; + } + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); + dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", + pause_param->pause_trans_gap); + dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", + pause_param->pause_trans_time); +} + +static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) +{ + struct hclge_qos_pri_map_cmd *pri_map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "dump qos pri map fail, status is %d.\n", ret); + return; + } + + pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "dump qos pri map\n"); + dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); + dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); + dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); + dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); + dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); + dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); + dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); + dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); + dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); +} + +static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) +{ + struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; + struct hclge_rx_priv_buff_cmd *rx_buf_cmd; + struct hclge_rx_priv_wl_buf *rx_priv_wl; + struct hclge_rx_com_wl *rx_packet_cnt; + struct hclge_rx_com_thrd *rx_com_thrd; + struct hclge_rx_com_wl *rx_com_wl; + enum hclge_opcode_type cmd; + struct hclge_desc desc[2]; + int i, ret; + + cmd = HCLGE_OPC_TX_BUFF_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); + + tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM; i++) + dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, + tx_buf_cmd->tx_pkt_buff[i]); + + cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "\n"); + rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM; i++) + dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, + rx_buf_cmd->buf_num[i]); + + dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", + rx_buf_cmd->shared_buf); + + cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "\n"); + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, + rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, + rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); + + cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "\n"); + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, + rx_com_thrd->com_thrd[i].high, + rx_com_thrd->com_thrd[i].low); + + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, + rx_com_thrd->com_thrd[i].high, + rx_com_thrd->com_thrd[i].low); + + cmd = HCLGE_OPC_RX_COM_WL_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, "\n"); + dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", + rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); + + cmd = HCLGE_OPC_RX_GBL_PKT_CNT; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); + + return; + +err_qos_cmd_send: + dev_err(&hdev->pdev->dev, + "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret); +} + +static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) +{ + struct hclge_mac_ethertype_idx_rd_cmd *req0; + char printf_buf[HCLGE_DBG_BUF_LEN]; + struct hclge_desc desc; + int ret, i; + + dev_info(&hdev->pdev->dev, "mng tab:\n"); + memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); + strncat(printf_buf, + "entry|mac_addr |mask|ether|mask|vlan|mask", + HCLGE_DBG_BUF_LEN - 1); + strncat(printf_buf + strlen(printf_buf), + "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", + HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); + + dev_info(&hdev->pdev->dev, "%s", printf_buf); + + for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, + true); + req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; + req0->index = cpu_to_le16(i); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "call hclge_cmd_send fail, ret = %d\n", ret); + return; + } + + if (!req0->resp_code) + continue; + + memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); + snprintf(printf_buf, HCLGE_DBG_BUF_LEN, + "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", + req0->index, req0->mac_add[0], req0->mac_add[1], + req0->mac_add[2], req0->mac_add[3], req0->mac_add[4], + req0->mac_add[5]); + + snprintf(printf_buf + strlen(printf_buf), + HCLGE_DBG_BUF_LEN - strlen(printf_buf), + "%x |%04x |%x |%04x|%x |%02x |%02x |", + !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), + req0->ethter_type, + !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), + req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG, + !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), + req0->i_port_bitmap, req0->i_port_direction); + + snprintf(printf_buf + strlen(printf_buf), + HCLGE_DBG_BUF_LEN - strlen(printf_buf), + "%d |%d |%02d |%04d|%x\n", + !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B), + req0->egress_port & HCLGE_DBG_MNG_PF_ID, + (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, + req0->egress_queue, + !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B)); + + dev_info(&hdev->pdev->dev, "%s", printf_buf); + } +} + +static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, + bool sel_x, u32 loc) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret, i; + u32 *req; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + req1->index = cpu_to_le32(loc); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + return; + + dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", + sel_x ? "x" : "y", loc); + + req = (u32 *)req1->tcam_data; + for (i = 0; i < 2; i++) + dev_info(&hdev->pdev->dev, "%08x\n", *req++); + + req = (u32 *)req2->tcam_data; + for (i = 0; i < 6; i++) + dev_info(&hdev->pdev->dev, "%08x\n", *req++); + + req = (u32 *)req3->tcam_data; + for (i = 0; i < 5; i++) + dev_info(&hdev->pdev->dev, "%08x\n", *req++); +} + +static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) +{ + u32 i; + + for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) { + hclge_dbg_fd_tcam_read(hdev, 0, true, i); + hclge_dbg_fd_tcam_read(hdev, 0, false, i); + } +} + +int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { + hclge_dbg_fd_tcam(hdev); + } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { + hclge_dbg_dump_tc(hdev); + } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { + hclge_dbg_dump_tm_map(hdev, cmd_buf); + } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { + hclge_dbg_dump_tm(hdev); + } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { + hclge_dbg_dump_qos_pause_cfg(hdev); + } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { + hclge_dbg_dump_qos_pri_map(hdev); + } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { + hclge_dbg_dump_qos_buf_cfg(hdev); + } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { + hclge_dbg_dump_mng_table(hdev); + } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { + hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + } else { + dev_info(&hdev->pdev->dev, "unknown command\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h new file mode 100644 index 000000000000..d055fda41775 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -0,0 +1,713 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#ifndef __HCLGE_DEBUGFS_H +#define __HCLGE_DEBUGFS_H + +#define HCLGE_DBG_BUF_LEN 256 +#define HCLGE_DBG_MNG_TBL_MAX 64 + +#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0) +#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1) +#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2) +#define HCLGE_DBG_MNG_E_TYPE_B BIT(11) +#define HCLGE_DBG_MNG_DROP_B BIT(13) +#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF +#define HCLGE_DBG_MNG_PF_ID 0x0007 +#define HCLGE_DBG_MNG_VF_ID 0x00FF + +/* Get DFX BD number offset */ +#define HCLGE_DBG_DFX_BIOS_OFFSET 1 +#define HCLGE_DBG_DFX_SSU_0_OFFSET 2 +#define HCLGE_DBG_DFX_SSU_1_OFFSET 3 +#define HCLGE_DBG_DFX_IGU_OFFSET 4 +#define HCLGE_DBG_DFX_RPU_0_OFFSET 5 + +#define HCLGE_DBG_DFX_RPU_1_OFFSET 6 +#define HCLGE_DBG_DFX_NCSI_OFFSET 7 +#define HCLGE_DBG_DFX_RTC_OFFSET 8 +#define HCLGE_DBG_DFX_PPP_OFFSET 9 +#define HCLGE_DBG_DFX_RCB_OFFSET 10 +#define HCLGE_DBG_DFX_TQP_OFFSET 11 + +#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 + +#pragma pack(1) + +struct hclge_qos_pri_map_cmd { + u8 pri0_tc : 4, + pri1_tc : 4; + u8 pri2_tc : 4, + pri3_tc : 4; + u8 pri4_tc : 4, + pri5_tc : 4; + u8 pri6_tc : 4, + pri7_tc : 4; + u8 vlan_pri : 4, + rev : 4; +}; + +struct hclge_dbg_bitmap_cmd { + union { + u8 bitmap; + struct { + u8 bit0 : 1, + bit1 : 1, + bit2 : 1, + bit3 : 1, + bit4 : 1, + bit5 : 1, + bit6 : 1, + bit7 : 1; + }; + }; +}; + +struct hclge_dbg_dfx_message { + int flag; + char message[60]; +}; + +#pragma pack() + +static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, + {true, "DFX_MSIX_INFO_NIC_0"}, + {true, "DFX_MSIX_INFO_NIC_1"}, + {true, "DFX_MSIX_INFO_NIC_2"}, + {true, "DFX_MSIX_INFO_NIC_3"}, + + {true, "DFX_MSIX_INFO_ROC_0"}, + {true, "DFX_MSIX_INFO_ROC_1"}, + {true, "DFX_MSIX_INFO_ROC_2"}, + {true, "DFX_MSIX_INFO_ROC_3"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { + {false, "Reserved"}, + {true, "SSU_ETS_PORT_STATUS"}, + {true, "SSU_ETS_TCG_STATUS"}, + {false, "Reserved"}, + {false, "Reserved"}, + {true, "SSU_BP_STATUS_0"}, + + {true, "SSU_BP_STATUS_1"}, + {true, "SSU_BP_STATUS_2"}, + {true, "SSU_BP_STATUS_3"}, + {true, "SSU_BP_STATUS_4"}, + {true, "SSU_BP_STATUS_5"}, + {true, "SSU_MAC_TX_PFC_IND"}, + + {true, "MAC_SSU_RX_PFC_IND"}, + {true, "BTMP_AGEING_ST_B0"}, + {true, "BTMP_AGEING_ST_B1"}, + {true, "BTMP_AGEING_ST_B2"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "FULL_DROP_NUM"}, + {true, "PART_DROP_NUM"}, + {true, "PPP_KEY_DROP_NUM"}, + {true, "PPP_RLT_DROP_NUM"}, + {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, + {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, + + {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, + {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, + {true, "BTMP_AGEING_RLS_CNT_BANK0"}, + {true, "BTMP_AGEING_RLS_CNT_BANK1"}, + {true, "BTMP_AGEING_RLS_CNT_BANK2"}, + {true, "SSU_MB_RD_RLT_DROP_CNT"}, + + {true, "SSU_PPP_MAC_KEY_NUM_L"}, + {true, "SSU_PPP_MAC_KEY_NUM_H"}, + {true, "SSU_PPP_HOST_KEY_NUM_L"}, + {true, "SSU_PPP_HOST_KEY_NUM_H"}, + {true, "PPP_SSU_MAC_RLT_NUM_L"}, + {true, "PPP_SSU_MAC_RLT_NUM_H"}, + + {true, "PPP_SSU_HOST_RLT_NUM_L"}, + {true, "PPP_SSU_HOST_RLT_NUM_H"}, + {true, "NCSI_RX_PACKET_IN_CNT_L"}, + {true, "NCSI_RX_PACKET_IN_CNT_H"}, + {true, "NCSI_TX_PACKET_OUT_CNT_L"}, + {true, "NCSI_TX_PACKET_OUT_CNT_H"}, + + {true, "SSU_KEY_DROP_NUM"}, + {true, "MB_UNCOPY_NUM"}, + {true, "RX_OQ_DROP_PKT_CNT"}, + {true, "TX_OQ_DROP_PKT_CNT"}, + {true, "BANK_UNBALANCE_DROP_CNT"}, + {true, "BANK_UNBALANCE_RX_DROP_CNT"}, + + {true, "NIC_L2_ERR_DROP_PKT_CNT"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT"}, + {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "RX_OQ_GLB_DROP_PKT_CNT"}, + {false, "Reserved"}, + + {true, "LO_PRI_UNICAST_CUR_CNT"}, + {true, "HI_PRI_MULTICAST_CUR_CNT"}, + {true, "LO_PRI_MULTICAST_CUR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { + {true, "prt_id"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, + + {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, + {true, "PACKET_CURR_BUFFER_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "RX_PACKET_IN_CNT_L"}, + {true, "RX_PACKET_IN_CNT_H"}, + {true, "RX_PACKET_OUT_CNT_L"}, + {true, "RX_PACKET_OUT_CNT_H"}, + {true, "TX_PACKET_IN_CNT_L"}, + {true, "TX_PACKET_IN_CNT_H"}, + + {true, "TX_PACKET_OUT_CNT_L"}, + {true, "TX_PACKET_OUT_CNT_H"}, + {true, "ROC_RX_PACKET_IN_CNT_L"}, + {true, "ROC_RX_PACKET_IN_CNT_H"}, + {true, "ROC_TX_PACKET_OUT_CNT_L"}, + {true, "ROC_TX_PACKET_OUT_CNT_H"}, + + {true, "RX_PACKET_TC_IN_CNT_0_L"}, + {true, "RX_PACKET_TC_IN_CNT_0_H"}, + {true, "RX_PACKET_TC_IN_CNT_1_L"}, + {true, "RX_PACKET_TC_IN_CNT_1_H"}, + {true, "RX_PACKET_TC_IN_CNT_2_L"}, + {true, "RX_PACKET_TC_IN_CNT_2_H"}, + + {true, "RX_PACKET_TC_IN_CNT_3_L"}, + {true, "RX_PACKET_TC_IN_CNT_3_H"}, + {true, "RX_PACKET_TC_IN_CNT_4_L"}, + {true, "RX_PACKET_TC_IN_CNT_4_H"}, + {true, "RX_PACKET_TC_IN_CNT_5_L"}, + {true, "RX_PACKET_TC_IN_CNT_5_H"}, + + {true, "RX_PACKET_TC_IN_CNT_6_L"}, + {true, "RX_PACKET_TC_IN_CNT_6_H"}, + {true, "RX_PACKET_TC_IN_CNT_7_L"}, + {true, "RX_PACKET_TC_IN_CNT_7_H"}, + {true, "RX_PACKET_TC_OUT_CNT_0_L"}, + {true, "RX_PACKET_TC_OUT_CNT_0_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_1_L"}, + {true, "RX_PACKET_TC_OUT_CNT_1_H"}, + {true, "RX_PACKET_TC_OUT_CNT_2_L"}, + {true, "RX_PACKET_TC_OUT_CNT_2_H"}, + {true, "RX_PACKET_TC_OUT_CNT_3_L"}, + {true, "RX_PACKET_TC_OUT_CNT_3_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_4_L"}, + {true, "RX_PACKET_TC_OUT_CNT_4_H"}, + {true, "RX_PACKET_TC_OUT_CNT_5_L"}, + {true, "RX_PACKET_TC_OUT_CNT_5_H"}, + {true, "RX_PACKET_TC_OUT_CNT_6_L"}, + {true, "RX_PACKET_TC_OUT_CNT_6_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_7_L"}, + {true, "RX_PACKET_TC_OUT_CNT_7_H"}, + {true, "TX_PACKET_TC_IN_CNT_0_L"}, + {true, "TX_PACKET_TC_IN_CNT_0_H"}, + {true, "TX_PACKET_TC_IN_CNT_1_L"}, + {true, "TX_PACKET_TC_IN_CNT_1_H"}, + + {true, "TX_PACKET_TC_IN_CNT_2_L"}, + {true, "TX_PACKET_TC_IN_CNT_2_H"}, + {true, "TX_PACKET_TC_IN_CNT_3_L"}, + {true, "TX_PACKET_TC_IN_CNT_3_H"}, + {true, "TX_PACKET_TC_IN_CNT_4_L"}, + {true, "TX_PACKET_TC_IN_CNT_4_H"}, + + {true, "TX_PACKET_TC_IN_CNT_5_L"}, + {true, "TX_PACKET_TC_IN_CNT_5_H"}, + {true, "TX_PACKET_TC_IN_CNT_6_L"}, + {true, "TX_PACKET_TC_IN_CNT_6_H"}, + {true, "TX_PACKET_TC_IN_CNT_7_L"}, + {true, "TX_PACKET_TC_IN_CNT_7_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_0_L"}, + {true, "TX_PACKET_TC_OUT_CNT_0_H"}, + {true, "TX_PACKET_TC_OUT_CNT_1_L"}, + {true, "TX_PACKET_TC_OUT_CNT_1_H"}, + {true, "TX_PACKET_TC_OUT_CNT_2_L"}, + {true, "TX_PACKET_TC_OUT_CNT_2_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_3_L"}, + {true, "TX_PACKET_TC_OUT_CNT_3_H"}, + {true, "TX_PACKET_TC_OUT_CNT_4_L"}, + {true, "TX_PACKET_TC_OUT_CNT_4_H"}, + {true, "TX_PACKET_TC_OUT_CNT_5_L"}, + {true, "TX_PACKET_TC_OUT_CNT_5_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_6_L"}, + {true, "TX_PACKET_TC_OUT_CNT_6_H"}, + {true, "TX_PACKET_TC_OUT_CNT_7_L"}, + {true, "TX_PACKET_TC_OUT_CNT_7_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { + {true, "OQ_INDEX"}, + {true, "QUEUE_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { + {true, "prt_id"}, + {true, "IGU_RX_ERR_PKT"}, + {true, "IGU_RX_NO_SOF_PKT"}, + {true, "EGU_TX_1588_SHORT_PKT"}, + {true, "EGU_TX_1588_PKT"}, + {true, "EGU_TX_ERR_PKT"}, + + {true, "IGU_RX_OUT_L2_PKT"}, + {true, "IGU_RX_OUT_L3_PKT"}, + {true, "IGU_RX_OUT_L4_PKT"}, + {true, "IGU_RX_IN_L2_PKT"}, + {true, "IGU_RX_IN_L3_PKT"}, + {true, "IGU_RX_IN_L4_PKT"}, + + {true, "IGU_RX_EL3E_PKT"}, + {true, "IGU_RX_EL4E_PKT"}, + {true, "IGU_RX_L3E_PKT"}, + {true, "IGU_RX_L4E_PKT"}, + {true, "IGU_RX_ROCEE_PKT"}, + {true, "IGU_RX_OUT_UDP0_PKT"}, + + {true, "IGU_RX_IN_UDP0_PKT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "IGU_RX_OVERSIZE_PKT_L"}, + {true, "IGU_RX_OVERSIZE_PKT_H"}, + {true, "IGU_RX_UNDERSIZE_PKT_L"}, + {true, "IGU_RX_UNDERSIZE_PKT_H"}, + {true, "IGU_RX_OUT_ALL_PKT_L"}, + {true, "IGU_RX_OUT_ALL_PKT_H"}, + + {true, "IGU_TX_OUT_ALL_PKT_L"}, + {true, "IGU_TX_OUT_ALL_PKT_H"}, + {true, "IGU_RX_UNI_PKT_L"}, + {true, "IGU_RX_UNI_PKT_H"}, + {true, "IGU_RX_MULTI_PKT_L"}, + {true, "IGU_RX_MULTI_PKT_H"}, + + {true, "IGU_RX_BROAD_PKT_L"}, + {true, "IGU_RX_BROAD_PKT_H"}, + {true, "EGU_TX_OUT_ALL_PKT_L"}, + {true, "EGU_TX_OUT_ALL_PKT_H"}, + {true, "EGU_TX_UNI_PKT_L"}, + {true, "EGU_TX_UNI_PKT_H"}, + + {true, "EGU_TX_MULTI_PKT_L"}, + {true, "EGU_TX_MULTI_PKT_H"}, + {true, "EGU_TX_BROAD_PKT_L"}, + {true, "EGU_TX_BROAD_PKT_H"}, + {true, "IGU_TX_KEY_NUM_L"}, + {true, "IGU_TX_KEY_NUM_H"}, + + {true, "IGU_RX_NON_TUN_PKT_L"}, + {true, "IGU_RX_NON_TUN_PKT_H"}, + {true, "IGU_RX_TUN_PKT_L"}, + {true, "IGU_RX_TUN_PKT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { + {true, "tc_queue_num"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "RPU_RX_PKT_DROP_CNT"}, + {true, "BUF_WAIT_TIMEOUT"}, + {true, "BUF_WAIT_TIMEOUT_QID"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { + {false, "Reserved"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + + {true, "FIFO_DFX_ST5"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { + {false, "Reserved"}, + {true, "NCSI_EGU_TX_FIFO_STS"}, + {true, "NCSI_PAUSE_STATUS"}, + {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, + + {true, "NCSI_RX_CTRL_PKT_CNT"}, + {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_PKT_CNT"}, + {true, "NCSI_RX_FCS_ERR_CNT"}, + {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, + + {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_CNT"}, + {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_PKT_CNT"}, + {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, + + {true, "NCSI_TX_PT_PKT_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, + {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, + {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "NCSI_MAC_RX_OCTETS_OK"}, + {true, "NCSI_MAC_RX_OCTETS_BAD"}, + {true, "NCSI_MAC_RX_UC_PKTS"}, + {true, "NCSI_MAC_RX_MC_PKTS"}, + {true, "NCSI_MAC_RX_BC_PKTS"}, + {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, + + {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, + + {true, "NCSI_MAC_RX_FCS_ERRORS"}, + {true, "NCSI_MAC_RX_LONG_ERRORS"}, + {true, "NCSI_MAC_RX_JABBER_ERRORS"}, + {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, + {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, + {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, + + {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, + {true, "NCSI_MAC_TX_OCTETS_OK"}, + {true, "NCSI_MAC_TX_OCTETS_BAD"}, + {true, "NCSI_MAC_TX_UC_PKTS"}, + {true, "NCSI_MAC_TX_MC_PKTS"}, + {true, "NCSI_MAC_TX_BC_PKTS"}, + + {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, + + {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, + {true, "NCSI_MAC_TX_UNDERRUN"}, + {true, "NCSI_MAC_TX_CRC_ERROR"}, + {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, + {true, "NCSI_MAC_RX_PAD_PKTS"}, + {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { + {false, "Reserved"}, + {true, "LGE_IGU_AFIFO_DFX_0"}, + {true, "LGE_IGU_AFIFO_DFX_1"}, + {true, "LGE_IGU_AFIFO_DFX_2"}, + {true, "LGE_IGU_AFIFO_DFX_3"}, + {true, "LGE_IGU_AFIFO_DFX_4"}, + + {true, "LGE_IGU_AFIFO_DFX_5"}, + {true, "LGE_IGU_AFIFO_DFX_6"}, + {true, "LGE_IGU_AFIFO_DFX_7"}, + {true, "LGE_EGU_AFIFO_DFX_0"}, + {true, "LGE_EGU_AFIFO_DFX_1"}, + {true, "LGE_EGU_AFIFO_DFX_2"}, + + {true, "LGE_EGU_AFIFO_DFX_3"}, + {true, "LGE_EGU_AFIFO_DFX_4"}, + {true, "LGE_EGU_AFIFO_DFX_5"}, + {true, "LGE_EGU_AFIFO_DFX_6"}, + {true, "LGE_EGU_AFIFO_DFX_7"}, + {true, "CGE_IGU_AFIFO_DFX_0"}, + + {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_0"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { + {false, "Reserved"}, + {true, "DROP_FROM_PRT_PKT_CNT"}, + {true, "DROP_FROM_HOST_PKT_CNT"}, + {true, "DROP_TX_VLAN_PROC_CNT"}, + {true, "DROP_MNG_CNT"}, + {true, "DROP_FD_CNT"}, + + {true, "DROP_NO_DST_CNT"}, + {true, "DROP_MC_MBID_FULL_CNT"}, + {true, "DROP_SC_FILTERED"}, + {true, "PPP_MC_DROP_PKT_CNT"}, + {true, "DROP_PT_CNT"}, + {true, "DROP_MAC_ANTI_SPOOF_CNT"}, + + {true, "DROP_IG_VFV_CNT"}, + {true, "DROP_IG_PRTV_CNT"}, + {true, "DROP_CNM_PFC_PAUSE_CNT"}, + {true, "DROP_TORUS_TC_CNT"}, + {true, "DROP_TORUS_LPBK_CNT"}, + {true, "PPP_HFS_STS"}, + + {true, "PPP_MC_RSLT_STS"}, + {true, "PPP_P3U_STS"}, + {true, "PPP_RSLT_DESCR_STS"}, + {true, "PPP_UMV_STS_0"}, + {true, "PPP_UMV_STS_1"}, + {true, "PPP_VFV_STS"}, + + {true, "PPP_GRO_KEY_CNT"}, + {true, "PPP_GRO_INFO_CNT"}, + {true, "PPP_GRO_DROP_CNT"}, + {true, "PPP_GRO_OUT_CNT"}, + {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, + {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, + + {true, "PPP_GRO_INFO_MATCH_CNT"}, + {true, "PPP_GRO_FREE_ENTRY_CNT"}, + {true, "PPP_GRO_INNER_DFX_SIGNAL"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "GET_RX_PKT_CNT_L"}, + {true, "GET_RX_PKT_CNT_H"}, + {true, "GET_TX_PKT_CNT_L"}, + {true, "GET_TX_PKT_CNT_H"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, + {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, + + {true, "SEND_MC_FROM_PRT_CNT_L"}, + {true, "SEND_MC_FROM_PRT_CNT_H"}, + {true, "SEND_MC_FROM_HOST_CNT_L"}, + {true, "SEND_MC_FROM_HOST_CNT_H"}, + {true, "SSU_MC_RD_CNT_L"}, + {true, "SSU_MC_RD_CNT_H"}, + + {true, "SSU_MC_DROP_CNT_L"}, + {true, "SSU_MC_DROP_CNT_H"}, + {true, "SSU_MC_RD_PKT_CNT_L"}, + {true, "SSU_MC_RD_PKT_CNT_H"}, + {true, "PPP_MC_2HOST_PKT_CNT_L"}, + {true, "PPP_MC_2HOST_PKT_CNT_H"}, + + {true, "PPP_MC_2PRT_PKT_CNT_L"}, + {true, "PPP_MC_2PRT_PKT_CNT_H"}, + {true, "NTSNOS_PKT_CNT_L"}, + {true, "NTSNOS_PKT_CNT_H"}, + {true, "NTUP_PKT_CNT_L"}, + {true, "NTUP_PKT_CNT_H"}, + + {true, "NTLCL_PKT_CNT_L"}, + {true, "NTLCL_PKT_CNT_H"}, + {true, "NTTGT_PKT_CNT_L"}, + {true, "NTTGT_PKT_CNT_H"}, + {true, "RTNS_PKT_CNT_L"}, + {true, "RTNS_PKT_CNT_H"}, + + {true, "RTLPBK_PKT_CNT_L"}, + {true, "RTLPBK_PKT_CNT_H"}, + {true, "NR_PKT_CNT_L"}, + {true, "NR_PKT_CNT_H"}, + {true, "RR_PKT_CNT_L"}, + {true, "RR_PKT_CNT_H"}, + + {true, "MNG_TBL_HIT_CNT_L"}, + {true, "MNG_TBL_HIT_CNT_H"}, + {true, "FD_TBL_HIT_CNT_L"}, + {true, "FD_TBL_HIT_CNT_H"}, + {true, "FD_LKUP_CNT_L"}, + {true, "FD_LKUP_CNT_H"}, + + {true, "BC_HIT_CNT_L"}, + {true, "BC_HIT_CNT_H"}, + {true, "UM_TBL_UC_HIT_CNT_L"}, + {true, "UM_TBL_UC_HIT_CNT_H"}, + {true, "UM_TBL_MC_HIT_CNT_L"}, + {true, "UM_TBL_MC_HIT_CNT_H"}, + + {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, + {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, + {true, "MTA_TBL_HIT_CNT_L"}, + {true, "MTA_TBL_HIT_CNT_H"}, + {true, "FWD_BONDING_HIT_CNT_L"}, + {true, "FWD_BONDING_HIT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_CNT_L"}, + {true, "PROMIS_TBL_HIT_CNT_H"}, + {true, "GET_TUNL_PKT_CNT_L"}, + {true, "GET_TUNL_PKT_CNT_H"}, + {true, "GET_BMC_PKT_CNT_L"}, + {true, "GET_BMC_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, + {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, + {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, + {true, "PPP_MC_2BMC_PKT_CNT_L"}, + {true, "PPP_MC_2BMC_PKT_CNT_H"}, + {true, "VLAN_MIRR_CNT_L"}, + {true, "VLAN_MIRR_CNT_H"}, + + {true, "IG_MIRR_CNT_L"}, + {true, "IG_MIRR_CNT_H"}, + {true, "EG_MIRR_CNT_L"}, + {true, "EG_MIRR_CNT_H"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, + + {true, "LAN_PAIR_CNT_L"}, + {true, "LAN_PAIR_CNT_H"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, + {true, "MTA_TBL_HIT_PKT_CNT_L"}, + {true, "MTA_TBL_HIT_PKT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, + {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { + {false, "Reserved"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "FSM_DFX_ST2"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + {true, "FIFO_DFX_ST5"}, + {true, "FIFO_DFX_ST6"}, + {true, "FIFO_DFX_ST7"}, + + {true, "FIFO_DFX_ST8"}, + {true, "FIFO_DFX_ST9"}, + {true, "FIFO_DFX_ST10"}, + {true, "FIFO_DFX_ST11"}, + {true, "Q_CREDIT_VLD_0"}, + {true, "Q_CREDIT_VLD_1"}, + + {true, "Q_CREDIT_VLD_2"}, + {true, "Q_CREDIT_VLD_3"}, + {true, "Q_CREDIT_VLD_4"}, + {true, "Q_CREDIT_VLD_5"}, + {true, "Q_CREDIT_VLD_6"}, + {true, "Q_CREDIT_VLD_7"}, + + {true, "Q_CREDIT_VLD_8"}, + {true, "Q_CREDIT_VLD_9"}, + {true, "Q_CREDIT_VLD_10"}, + {true, "Q_CREDIT_VLD_11"}, + {true, "Q_CREDIT_VLD_12"}, + {true, "Q_CREDIT_VLD_13"}, + + {true, "Q_CREDIT_VLD_14"}, + {true, "Q_CREDIT_VLD_15"}, + {true, "Q_CREDIT_VLD_16"}, + {true, "Q_CREDIT_VLD_17"}, + {true, "Q_CREDIT_VLD_18"}, + {true, "Q_CREDIT_VLD_19"}, + + {true, "Q_CREDIT_VLD_20"}, + {true, "Q_CREDIT_VLD_21"}, + {true, "Q_CREDIT_VLD_22"}, + {true, "Q_CREDIT_VLD_23"}, + {true, "Q_CREDIT_VLD_24"}, + {true, "Q_CREDIT_VLD_25"}, + + {true, "Q_CREDIT_VLD_26"}, + {true, "Q_CREDIT_VLD_27"}, + {true, "Q_CREDIT_VLD_28"}, + {true, "Q_CREDIT_VLD_29"}, + {true, "Q_CREDIT_VLD_30"}, + {true, "Q_CREDIT_VLD_31"}, + + {true, "GRO_BD_SERR_CNT"}, + {true, "GRO_CONTEXT_SERR_CNT"}, + {true, "RX_STASH_CFG_SERR_CNT"}, + {true, "AXI_RD_FBD_SERR_CNT"}, + {true, "GRO_BD_MERR_CNT"}, + {true, "GRO_CONTEXT_MERR_CNT"}, + + {true, "RX_STASH_CFG_MERR_CNT"}, + {true, "AXI_RD_FBD_MERR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { + {true, "q_num"}, + {true, "RCB_CFG_RX_RING_TAIL"}, + {true, "RCB_CFG_RX_RING_HEAD"}, + {true, "RCB_CFG_RX_RING_FBDNUM"}, + {true, "RCB_CFG_RX_RING_OFFSET"}, + {true, "RCB_CFG_RX_RING_FBDOFFSET"}, + + {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, + {true, "RCB_CFG_TX_RING_TAIL"}, + {true, "RCB_CFG_TX_RING_HEAD"}, + {true, "RCB_CFG_TX_RING_FBDNUM"}, + {true, "RCB_CFG_TX_RING_OFFSET"}, + {true, "RCB_CFG_TX_RING_EBDNUM"}, +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 6da9e22d82d0..d0f654123b9b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,78 +4,39 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" }, { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" }, { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" }, { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" }, { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" }, { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" }, { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = { - { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" }, { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" }, { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" }, { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" }, { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" }, { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" }, { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = { - { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" }, - { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" }, - { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" }, - { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" }, - { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, + { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, + { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, + { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, + { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" }, - { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" }, - { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" }, { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, @@ -85,15 +46,19 @@ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_igu_com_err_int[] = { +static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { + { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_igu_int[] = { { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" }, { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = { +static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, @@ -104,51 +69,11 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = { }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_mpf_int0[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" }, - { .int_msk = BIT(27), - .msg = "flow_director_ad_mem0_ecc_1bit_err" }, - { .int_msk = BIT(28), - .msg = "flow_director_ad_mem1_ecc_1bit_err" }, - { .int_msk = BIT(29), - .msg = "rx_vlan_tag_memory_ecc_1bit_err" }, - { .int_msk = BIT(30), - .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_ppp_mpf_int1[] = { +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, @@ -187,23 +112,13 @@ static const struct hclge_hw_error hclge_ppp_mpf_int1[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_pf_int[] = { - { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" }, +static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" }, { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_mpf_int2[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_ppp_mpf_int3[] = { +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, @@ -213,145 +128,248 @@ static const struct hclge_hw_error hclge_ppp_mpf_int3[] = { { /* sentinel */ } }; -struct hclge_tm_sch_ecc_info { - const char *name; -}; - -static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = { - { - { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" }, - { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" }, - }, - { - { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" }, - { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" }, - { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" }, - { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" }, - { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" }, - }, - { - { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" }, - { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" }, - { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" }, - { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" }, - { .name = "NIC_QUEUE_CTRL:QCLEN TAB" }, - }, - { - { .name = "RAM_CFG_CTRL:CSHAP TAB" }, - { .name = "RAM_CFG_CTRL:PSHAP TAB" }, - }, - { - { .name = "SHAPER_CTRL:PSHAP TAB" }, - }, - { - { .name = "MSCH_CTRL" }, - }, - { - { .name = "TOP_CTRL" }, - }, -}; - -static const struct hclge_hw_error hclge_tm_sch_err_int[] = { - { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" }, +static const struct hclge_hw_error hclge_tm_sch_rint[] = { { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" }, { .int_msk = BIT(12), - .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_port_shap_offset_fifo_wr_err" }, { .int_msk = BIT(13), - .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_port_shap_offset_fifo_rd_err" }, { .int_msk = BIT(14), - .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" }, { .int_msk = BIT(15), - .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" }, { .int_msk = BIT(16), - .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" }, { .int_msk = BIT(17), - .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" }, { .int_msk = BIT(18), - .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" }, { .int_msk = BIT(19), - .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" }, { .int_msk = BIT(20), - .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" }, { .int_msk = BIT(21), - .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" }, + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { + { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" }, + { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" }, + { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" }, + { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" }, + { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" }, + { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" }, + { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" }, + { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" }, + { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" }, + { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" }, + { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" }, + { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" }, + { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" }, + { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" }, + { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" }, + { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" }, + { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" }, + { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = { - { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" }, +static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" }, { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" }, { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" }, { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" }, { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" }, { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" }, { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" }, { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" }, { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" }, { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, { /* sentinel */ } }; -static void hclge_log_error(struct device *dev, - const struct hclge_hw_error *err_list, +static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" }, + { .int_msk = BIT(26), .msg = "rd_bus_err" }, + { .int_msk = BIT(27), .msg = "wr_bus_err" }, + { .int_msk = BIT(28), .msg = "reg_search_miss" }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss" }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { + { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" }, + { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "over_8bd_no_fe" }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_com_err_int[] = { + { .int_msk = BIT(0), .msg = "buf_sum_err" }, + { .int_msk = BIT(1), .msg = "ppp_mb_num_err" }, + { .int_msk = BIT(2), .msg = "ppp_mbid_err" }, + { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" }, + { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" }, + { .int_msk = BIT(5), .msg = "cks_edit_position_err" }, + { .int_msk = BIT(6), .msg = "cks_edit_condition_err" }, + { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" }, + { .int_msk = BIT(8), .msg = "vlan_num_ot_err" }, + { .int_msk = BIT(9), .msg = "vlan_num_in_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, + { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, + { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" }, + { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" }, + { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" }, + { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" }, + { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" }, + { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" }, + { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" }, + { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" }, + { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" }, + { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" }, + { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { + { .int_msk = BIT(0), .msg = "ig_mac_inf_int" }, + { .int_msk = BIT(1), .msg = "ig_host_inf_int" }, + { .int_msk = BIT(2), .msg = "ig_roc_buf_int" }, + { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" }, + { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" }, + { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" }, + { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" }, + { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" }, + { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" }, + { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" }, + { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" }, + { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" }, + { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" }, + { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" }, + { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" }, + { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" }, + { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" }, + { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" }, + { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" }, + { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" }, + { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" }, + { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" }, + { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" }, + { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { + { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" }, + { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" }, + { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" }, + { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port" }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { + { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, + { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, + { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, + { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" }, + { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" }, + { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" }, + { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" }, + { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" }, + { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" }, + { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" }, + { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" }, + { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" }, + { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" }, + { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" }, + { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" }, + { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" }, + { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" }, + { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" }, + { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" }, + { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" }, + { /* sentinel */ } +}; + +static void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, u32 err_sts) { - const struct hclge_hw_error *err; - int i = 0; - - while (err_list[i].msg) { - err = &err_list[i]; - if (!(err->int_msk & err_sts)) { - i++; - continue; - } - dev_warn(dev, "%s [error status=0x%x] found\n", - err->msg, err_sts); - i++; + while (err->msg) { + if (err->int_msk & err_sts) + dev_warn(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); + err++; } } @@ -391,96 +409,44 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, return ret; } -/* hclge_cmd_clear_error: clear the error status - * @hdev: pointer to struct hclge_dev - * @desc: descriptor for describing the command - * @desc_src: prefilled descriptor from the previous command for reusing - * @cmd: command opcode - * @flag: flag for extended command structure - * - * This function clear the error status in the hw register/s using command - */ -static int hclge_cmd_clear_error(struct hclge_dev *hdev, - struct hclge_desc *desc, - struct hclge_desc *desc_src, - u32 cmd, u16 flag) -{ - struct device *dev = &hdev->pdev->dev; - int num = 1; - int ret, i; - - if (cmd) { - hclge_cmd_setup_basic_desc(&desc[0], cmd, false); - if (flag) { - desc[0].flag |= cpu_to_le16(flag); - hclge_cmd_setup_basic_desc(&desc[1], cmd, false); - num = 2; - } - if (desc_src) { - for (i = 0; i < 6; i++) { - desc[0].data[i] = desc_src[0].data[i]; - if (flag) - desc[1].data[i] = desc_src[1].data[i]; - } - } - } else { - hclge_cmd_reuse_desc(&desc[0], false); - if (flag) { - desc[0].flag |= cpu_to_le16(flag); - hclge_cmd_reuse_desc(&desc[1], false); - num = 2; - } - } - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); - if (ret) - dev_err(dev, "clear error cmd failed (%d)\n", ret); - - return ret; -} - -static int hclge_enable_common_error(struct hclge_dev *hdev, bool en) +static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; int ret; + /* configure common error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); if (en) { - /* enable COMMON error interrupts */ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); - desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN); desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); - } else { - /* disable COMMON error interrupts */ - desc[0].data[0] = 0; - desc[0].data[2] = 0; - desc[0].data[3] = 0; - desc[0].data[4] = 0; - desc[0].data[5] = 0; } + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); - desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK); desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); if (ret) dev_err(dev, - "failed(%d) to enable/disable COMMON err interrupts\n", - ret); + "fail(%d) to configure common err interrupts\n", ret); return ret; } -static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en) +static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; @@ -489,74 +455,65 @@ static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en) if (hdev->pdev->revision < 0x21) return 0; - /* enable/disable NCSI error interrupts */ + /* configure NCSI error interrupts */ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); - else - desc.data[0] = 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(dev, - "failed(%d) to enable/disable NCSI error interrupts\n", - ret); + "fail(%d) to configure NCSI error interrupts\n", ret); return ret; } -static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en) +static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; int ret; - /* enable/disable error interrupts */ + /* configure IGU,EGU error interrupts */ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN); - else - desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(dev, - "failed(%d) to enable/disable IGU common interrupts\n", - ret); + "fail(%d) to configure IGU common interrupts\n", ret); return ret; } hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); - else - desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(dev, - "failed(%d) to enable/disable IGU-EGU TNL interrupts\n", - ret); + "fail(%d) to configure IGU-EGU TNL interrupts\n", ret); return ret; } - ret = hclge_enable_ncsi_error(hdev, en); - if (ret) - dev_err(dev, "fail(%d) to en/disable err int\n", ret); + ret = hclge_config_ncsi_hw_err_int(hdev, en); return ret; } -static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, +static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; int ret; - /* enable/disable PPP error interrupts */ + /* configure PPP error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], cmd, false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], cmd, false); @@ -567,24 +524,24 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); desc[0].data[1] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); - } else { - desc[0].data[0] = 0; - desc[0].data[1] = 0; + desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN); } + desc[1].data[0] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); desc[1].data[1] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + if (hdev->pdev->revision >= 0x21) + desc[1].data[2] = + cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK); } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { if (en) { desc[0].data[0] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); desc[0].data[1] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); - } else { - desc[0].data[0] = 0; - desc[0].data[1] = 0; } + desc[1].data[0] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); desc[1].data[1] = @@ -593,491 +550,863 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); if (ret) - dev_err(dev, - "failed(%d) to enable/disable PPP error interrupts\n", - ret); + dev_err(dev, "fail(%d) to configure PPP error intr\n", ret); return ret; } -static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en) +static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en) { - struct device *dev = &hdev->pdev->dev; int ret; - ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, en); - if (ret) { - dev_err(dev, - "failed(%d) to enable/disable PPP error intr 0,1\n", - ret); + if (ret) return ret; - } - ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, en); - if (ret) - dev_err(dev, - "failed(%d) to enable/disable PPP error intr 2,3\n", - ret); return ret; } -int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en) +static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; int ret; - /* enable TM SCH hw errors */ + /* configure TM SCH hw errors */ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); - else - desc.data[0] = 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret); + dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret); return ret; } - /* enable TM QCN hw errors */ + /* configure TM QCN hw errors */ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0, 0, 0); if (ret) { - dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret); + dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); return ret; } hclge_cmd_reuse_desc(&desc, false); if (en) desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); - else - desc.data[1] = 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(dev, - "failed(%d) to configure TM QCN mem errors\n", ret); + "fail(%d) to configure TM QCN mem errors\n", ret); return ret; } -static void hclge_process_common_error(struct hclge_dev *hdev, - enum hclge_err_int_type type) +static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; - struct hclge_desc desc[2]; - u32 err_sts; + struct hclge_desc desc; int ret; - /* read err sts */ - ret = hclge_cmd_query_error(hdev, &desc[0], - HCLGE_COMMON_ECC_INT_CFG, - HCLGE_CMD_FLAG_NEXT, 0, 0); - if (ret) { + /* configure MAC common error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) dev_err(dev, - "failed(=%d) to query COMMON error interrupt status\n", - ret); - return; - } + "fail(%d) to configure MAC COMMON error intr\n", ret); - /* log err */ - err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK; - hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts); + return ret; +} - err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK; - hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts); +static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int num = 1; + int ret; - err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT) - & HCLGE_CMDQ_ECC_INT_MASK; - hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts); + /* configure PPU error interrupts */ + if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= HCLGE_CMD_FLAG_NEXT; + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + if (en) { + desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN; + desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN; + desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN; + desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN; + } - if ((le32_to_cpu(desc[0].data[3])) & BIT(0)) - dev_warn(dev, "imp_rd_data_poison_err found\n"); + desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK; + desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; + desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; + desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; + num = 2; + } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2; - err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) & - HCLGE_TQP_ECC_INT_MASK; - hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts); + desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK; + } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN; - err_sts = (le32_to_cpu(desc[0].data[5])) & - HCLGE_IMP_ITCM4_ECC_INT_MASK; - hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts); + desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK; + } else { + dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); + return -EINVAL; + } - /* clear error interrupts */ - desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK); - desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK | - HCLGE_CMDQ_ROCEE_ECC_CLR_MASK); - desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK); - desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); - ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, - HCLGE_CMD_FLAG_NEXT); - if (ret) - dev_err(dev, - "failed(%d) to clear COMMON error interrupt status\n", - ret); + return ret; } -static void hclge_process_ncsi_error(struct hclge_dev *hdev, - enum hclge_err_int_type type) +static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; - struct hclge_desc desc_rd; - struct hclge_desc desc_wr; - u32 err_sts; int ret; - if (hdev->pdev->revision < 0x21) - return; - - /* read NCSI error status */ - ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY, - 0, 1, HCLGE_NCSI_ERR_INT_TYPE); + ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD, + en); if (ret) { - dev_err(dev, - "failed(=%d) to query NCSI error interrupt status\n", + dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n", ret); - return; + return ret; } - /* log err */ - err_sts = le32_to_cpu(desc_rd.data[0]); - hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts); + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_MPF_OTHER_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret); + return ret; + } - /* clear err int */ - ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, - HCLGE_NCSI_INT_CLR, 0); + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_PF_OTHER_INT_CMD, en); if (ret) - dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n", + dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n", ret); + return ret; } -static void hclge_process_igu_egu_error(struct hclge_dev *hdev, - enum hclge_err_int_type int_type) +static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; - struct hclge_desc desc_rd; - struct hclge_desc desc_wr; - u32 err_sts; + struct hclge_desc desc[2]; int ret; - /* read IGU common err sts */ - ret = hclge_cmd_query_error(hdev, &desc_rd, - HCLGE_IGU_COMMON_INT_QUERY, - 0, 1, int_type); - if (ret) { - dev_err(dev, "failed(=%d) to query IGU common int status\n", - ret); - return; + /* configure SSU ecc error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false); + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN); } - /* log err */ - err_sts = le32_to_cpu(desc_rd.data[0]) & - HCLGE_IGU_COM_INT_MASK; - hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts); + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK); - /* clear err int */ - ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, - HCLGE_IGU_COMMON_INT_CLR, 0); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); if (ret) { - dev_err(dev, "failed(=%d) to clear IGU common int status\n", - ret); - return; + dev_err(dev, + "fail(%d) to configure SSU ECC error interrupt\n", ret); + return ret; } - /* read IGU-EGU TNL err sts */ - ret = hclge_cmd_query_error(hdev, &desc_rd, - HCLGE_IGU_EGU_TNL_INT_QUERY, - 0, 1, int_type); - if (ret) { - dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n", - ret); - return; + /* configure SSU common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); + + if (en) { + if (hdev->pdev->revision >= 0x21) + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN); + else + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5)); + desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN); + desc[0].data[2] = + cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN); } - /* log err */ - err_sts = le32_to_cpu(desc_rd.data[0]) & - HCLGE_IGU_EGU_TNL_INT_MASK; - hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts); + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK | + HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); - /* clear err int */ - ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, - HCLGE_IGU_EGU_TNL_INT_CLR, 0); - if (ret) { - dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n", - ret); - return; - } + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure SSU COMMON error intr\n", ret); - hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE); + return ret; } -static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd, - enum hclge_err_int_type int_type) +#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \ + do { \ + if (ae_dev->ops->set_default_reset_request) \ + ae_dev->ops->set_default_reset_request(ae_dev, \ + reset_type); \ + } while (0) + +/* hclge_handle_mpf_ras_error: handle all main PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the main PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) { + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; - const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3; - struct hclge_desc desc[2]; - u32 err_sts; + __le32 *desc_data; + u32 status; int ret; - /* read PPP INT sts */ - ret = hclge_cmd_query_error(hdev, &desc[0], cmd, - HCLGE_CMD_FLAG_NEXT, 5, int_type); + /* query all main PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); if (ret) { - dev_err(dev, "failed(=%d) to query PPP interrupt status\n", - ret); - return -EIO; + dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret); + return ret; } - /* log error */ - if (cmd == HCLGE_PPP_CMD0_INT_CMD) { - hw_err_lst1 = &hclge_ppp_mpf_int0[0]; - hw_err_lst2 = &hclge_ppp_mpf_int1[0]; - hw_err_lst3 = &hclge_ppp_pf_int[0]; - } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { - hw_err_lst1 = &hclge_ppp_mpf_int2[0]; - hw_err_lst2 = &hclge_ppp_mpf_int3[0]; - } else { - dev_err(dev, "invalid command(=%d)\n", cmd); - return -EINVAL; + /* log HNS common errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) { + hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } - err_sts = le32_to_cpu(desc[0].data[2]); - if (err_sts) - hclge_log_error(dev, hw_err_lst1, err_sts); + status = le32_to_cpu(desc[0].data[1]); + if (status) { + hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } - err_sts = le32_to_cpu(desc[0].data[3]); - if (err_sts) - hclge_log_error(dev, hw_err_lst2, err_sts); + if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) { + dev_warn(dev, "imp_rd_data_poison_err found\n"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } - if (cmd == HCLGE_PPP_CMD0_INT_CMD) { - err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3; - if (err_sts) - hclge_log_error(dev, hw_err_lst3, err_sts); + status = le32_to_cpu(desc[0].data[3]); + if (status) { + hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } - /* clear PPP INT */ - ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, - HCLGE_CMD_FLAG_NEXT); - if (ret) { - dev_err(dev, "failed(=%d) to clear PPP interrupt status\n", - ret); - return -EIO; + status = le32_to_cpu(desc[0].data[4]); + if (status) { + hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } - return 0; + /* log SSU(Storage Switch Unit) errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*(desc_data + 2)); + if (status) { + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 3)) & BIT(0); + if (status) { + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; + if (status) { + hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + /* log IGU(Ingress Unit) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status); + + /* log PPP(Programmable Packet Process) errors */ + desc_data = (__le32 *)&desc[4]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], status); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], status); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) { + dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", + "rpu_rx_pkt_ecc_mbit_err"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 2)); + if (status) { + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; + if (status) { + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* log TM(Traffic Manager) errors */ + desc_data = (__le32 *)&desc[6]; + status = le32_to_cpu(*desc_data); + if (status) { + hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* log QCN(Quantized Congestion Control) errors */ + desc_data = (__le32 *)&desc[7]; + status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; + if (status) { + hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; + if (status) { + hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* log NCSI errors */ + desc_data = (__le32 *)&desc[9]; + status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; + if (status) { + hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* clear all main PF RAS errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); + + return ret; } -static void hclge_process_ppp_error(struct hclge_dev *hdev, - enum hclge_err_int_type int_type) +/* hclge_handle_pf_ras_error: handle all PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) { + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; int ret; - /* read PPP INT0,1 sts */ - ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD, - int_type); - if (ret < 0) { - dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n", - ret); - return; + /* query all PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret); + return ret; } - /* read err PPP INT2,3 sts */ - ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD, - int_type); - if (ret < 0) - dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n", - ret); + /* log SSU(Storage Switch Unit) errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) { + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + status = le32_to_cpu(desc[0].data[1]); + if (status) { + hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + status = le32_to_cpu(desc[0].data[2]); + if (status) { + hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], status); + + /* clear all PF RAS errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); + + return ret; } -static void hclge_process_tm_sch_error(struct hclge_dev *hdev) +static int hclge_handle_all_ras_errors(struct hclge_dev *hdev) { struct device *dev = &hdev->pdev->dev; - const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info; - struct hclge_desc desc; - u32 ecc_info; - u8 module_no; - u8 ram_no; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc desc_bd; + struct hclge_desc *desc; int ret; - /* read TM scheduler errors */ - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0); + /* query the number of registers in the RAS int status */ + hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM, + true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); if (ret) { - dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret); - return; + dev_err(dev, "fail(%d) to query ras int status bd num\n", ret); + return ret; } - ecc_info = le32_to_cpu(desc.data[0]); + mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + pf_bd_num = le32_to_cpu(desc_bd.data[1]); + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0); + /* handle all main PF RAS errors */ + ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num); if (ret) { - dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret); - return; + kfree(desc); + return ret; } + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); - /* log TM scheduler errors */ - if (le32_to_cpu(desc.data[0])) { - hclge_log_error(dev, &hclge_tm_sch_err_int[0], - le32_to_cpu(desc.data[0])); - if (le32_to_cpu(desc.data[0]) & 0x2) { - module_no = (ecc_info >> 20) & 0xF; - ram_no = (ecc_info >> 16) & 0xF; - tm_sch_ecc_info = - &hclge_tm_sch_ecc_err[module_no][ram_no]; - dev_warn(dev, "ecc err module:ram=%s\n", - tm_sch_ecc_info->name); - dev_warn(dev, "ecc memory address = 0x%x\n", - ecc_info & 0xFFFF); + /* handle all PF RAS errors */ + ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num); + kfree(desc); + + return ret; +} + +static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* read overflow error status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_ROCEE_PF_RAS_INT_CMD, + 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); + return ret; + } + + /* log overflow error */ + if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + const struct hclge_hw_error *err; + u32 err_sts; + + err = &hclge_rocee_qmm_ovf_err_int[0]; + err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK & + le32_to_cpu(desc[0].data[0]); + while (err->msg) { + if (err->int_msk == err_sts) { + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); + break; + } + err++; } } - /* clear TM scheduler errors */ - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret); - return; + if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); } - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to read SCH CE status\n", ret); - return; + if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); } - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + return 0; +} + +static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +{ + enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + unsigned int status; + int ret; + + /* read RAS error interrupt status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, + 0, 0, 0); if (ret) { - dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret); - return; + dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); + /* reset everything for now */ + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + return ret; } - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to read SCH NFE status\n", ret); - return; + status = le32_to_cpu(desc[0].data[0]); + + if (status & HCLGE_ROCEE_RERR_INT_MASK) + dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + + if (status & HCLGE_ROCEE_BERR_INT_MASK) + dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + + if (status & HCLGE_ROCEE_ECC_INT_MASK) { + dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); + reset_type = HNAE3_GLOBAL_RESET; } - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret); - return; + if (status & HCLGE_ROCEE_OVF_INT_MASK) { + ret = hclge_log_rocee_ovf_error(hdev); + if (ret) { + dev_err(dev, "failed(%d) to process ovf error\n", ret); + /* reset everything for now */ + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + return ret; + } } - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0); + /* clear error status */ + hclge_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); if (ret) { - dev_err(dev, "failed(%d) to read SCH FE status\n", ret); - return; + dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); + /* reset everything for now */ + reset_type = HNAE3_GLOBAL_RESET; } - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); - if (ret) - dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); + + return ret; } -static void hclge_process_tm_qcn_error(struct hclge_dev *hdev) +static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; int ret; - /* read QCN errors */ - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret); - return; - } + if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false); + if (en) { + /* enable ROCEE hw error interrupts */ + desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN); - /* log QCN errors */ - if (le32_to_cpu(desc.data[0])) - hclge_log_error(dev, &hclge_qcn_ecc_err_int[0], - le32_to_cpu(desc.data[0])); + hclge_log_and_clear_rocee_ras_error(hdev); + } + desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK); + desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK); - /* clear QCN errors */ - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(dev, "failed(%d) to clear QCN error status\n", ret); + dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret); + + return ret; } -static void hclge_process_tm_error(struct hclge_dev *hdev, - enum hclge_err_int_type type) +static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { - hclge_process_tm_sch_error(hdev); - hclge_process_tm_qcn_error(hdev); + struct hclge_dev *hdev = ae_dev->priv; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + hdev->pdev->revision < 0x21) + return HNAE3_NONE_RESET; + + return hclge_log_and_clear_rocee_ras_error(hdev); } static const struct hclge_hw_blk hw_blk[] = { - { .msk = BIT(0), .name = "IGU_EGU", - .enable_error = hclge_enable_igu_egu_error, - .process_error = hclge_process_igu_egu_error, }, - { .msk = BIT(5), .name = "COMMON", - .enable_error = hclge_enable_common_error, - .process_error = hclge_process_common_error, }, - { .msk = BIT(4), .name = "TM", - .enable_error = hclge_enable_tm_hw_error, - .process_error = hclge_process_tm_error, }, - { .msk = BIT(1), .name = "PPP", - .enable_error = hclge_enable_ppp_error, - .process_error = hclge_process_ppp_error, }, + { + .msk = BIT(0), .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, + { + .msk = BIT(1), .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, + { + .msk = BIT(2), .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, + { + .msk = BIT(3), .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, + { + .msk = BIT(4), .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, + { + .msk = BIT(5), .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, + { + .msk = BIT(8), .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { /* sentinel */ } }; int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state) { + const struct hclge_hw_blk *module = hw_blk; struct device *dev = &hdev->pdev->dev; int ret = 0; - int i = 0; - while (hw_blk[i].name) { - if (!hw_blk[i].enable_error) { - i++; - continue; - } - ret = hw_blk[i].enable_error(hdev, state); - if (ret) { - dev_err(dev, "fail(%d) to en/disable err int\n", ret); - return ret; + while (module->name) { + if (module->config_err_int) { + ret = module->config_err_int(hdev, state); + if (ret) + return ret; } - i++; + module++; } + ret = hclge_config_rocee_ras_interrupt(hdev, state); + if (ret) + dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret); + return ret; } -pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev) +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; struct device *dev = &hdev->pdev->dev; - u32 sts, val; - int i = 0; - - sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); - - /* Processing Non-fatal errors */ - if (sts & HCLGE_RAS_REG_NFE_MASK) { - val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF; - i = 0; - while (hw_blk[i].name) { - if (!(hw_blk[i].msk & val)) { - i++; - continue; - } - dev_warn(dev, "%s ras non-fatal error identified\n", - hw_blk[i].name); - if (hw_blk[i].process_error) - hw_blk[i].process_error(hdev, - HCLGE_ERR_INT_RAS_NFE); - i++; - } + u32 status; + + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* Handling Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_warn(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); + hclge_handle_all_ras_errors(hdev); + } else { + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + hdev->pdev->revision < 0x21) + return PCI_ERS_RESULT_RECOVERED; } - return PCI_ERS_RESULT_NEED_RESET; + if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { + dev_warn(dev, "ROCEE uncorrected RAS error identified\n"); + hclge_handle_rocee_ras_error(ae_dev); + } + + if (status & HCLGE_RAS_REG_NFE_MASK || + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) + return PCI_ERS_RESULT_NEED_RESET; + + return PCI_ERS_RESULT_RECOVERED; +} + +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc desc_bd; + struct hclge_desc *desc; + __le32 *desc_data; + int ret = 0; + u32 status; + + /* set default handling */ + set_bit(HNAE3_FUNC_RESET, reset_requests); + + /* query the number of bds for the MSIx int status */ + hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM, + true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "fail(%d) to query msix int status bd num\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + return ret; + } + + mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + pf_bd_num = le32_to_cpu(desc_bd.data[1]); + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + goto out; + + /* query all main PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "query all mpf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + goto msi_error; + } + + /* log MAC errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data); + if (status) { + hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], status); + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 2)) & + HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; + if (status) { + dev_warn(dev, + "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", + status); + set_bit(HNAE3_CORE_RESET, reset_requests); + } + + /* clear all main PF MSIx errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + goto msi_error; + } + + /* query all PF MSIx errors */ + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "query all pf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + goto msi_error; + } + + /* log SSU PF errors */ + status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; + if (status) { + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], status); + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + + /* read and log PPP PF errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], status); + + /* PPU(RCB) PF errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], status); + + /* clear all PF MSIx errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "clear all pf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + +msi_error: + kfree(desc); +out: + return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index e0e3b5861495..51a7d4eb066a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -7,9 +7,11 @@ #include "hclge_main.h" #define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 -#define HCLGE_RAS_REG_FE_MASK 0xFF #define HCLGE_RAS_REG_NFE_MASK 0xFF00 -#define HCLGE_RAS_REG_NFE_SHIFT 8 +#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000 + +#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800 +#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00 #define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 #define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 @@ -23,6 +25,8 @@ #define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 #define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF #define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000 +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000 #define HCLGE_IGU_ERR_INT_EN 0x0000066F #define HCLGE_IGU_ERR_INT_EN_MASK 0x000F #define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF @@ -41,21 +45,55 @@ #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF #define HCLGE_NCSI_ERR_INT_EN 0x3 #define HCLGE_NCSI_ERR_INT_TYPE 0x9 +#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0) +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101 +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101 +#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0) +#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000 +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0) +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0) + +#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF +#define HCLGE_IGU_INT_MASK GENMASK(3, 0) +#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0) +#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) +#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) +#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 +#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) +#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) +#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0) -#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF -#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3 -#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF -#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16 -#define HCLGE_TQP_ECC_INT_MASK 0xFFF -#define HCLGE_TQP_ECC_INT_SHIFT 16 -#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF -#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3 -#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF -#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000 -#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001 -#define HCLGE_IGU_COM_INT_MASK 0xF -#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F -#define HCLGE_PPP_PF_INT_MASK 0x100 +#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1 +#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1 +#define HCLGE_ROCEE_RERR_INT_MASK BIT(0) +#define HCLGE_ROCEE_BERR_INT_MASK BIT(1) +#define HCLGE_ROCEE_ECC_INT_MASK BIT(2) +#define HCLGE_ROCEE_OVF_INT_MASK BIT(3) +#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000 +#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F enum hclge_err_int_type { HCLGE_ERR_INT_MSIX = 0, @@ -67,9 +105,7 @@ enum hclge_err_int_type { struct hclge_hw_blk { u32 msk; const char *name; - int (*enable_error)(struct hclge_dev *hdev, bool en); - void (*process_error)(struct hclge_dev *hdev, - enum hclge_err_int_type type); + int (*config_err_int)(struct hclge_dev *hdev, bool en); }; struct hclge_hw_error { @@ -78,6 +114,7 @@ struct hclge_hw_error { }; int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); -int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en); -pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev); +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 77980e54ad09..3fe08cf477f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -26,7 +26,7 @@ #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, @@ -48,6 +48,62 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); +static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, + HCLGE_CMDQ_TX_ADDR_H_REG, + HCLGE_CMDQ_TX_DEPTH_REG, + HCLGE_CMDQ_TX_TAIL_REG, + HCLGE_CMDQ_TX_HEAD_REG, + HCLGE_CMDQ_RX_ADDR_L_REG, + HCLGE_CMDQ_RX_ADDR_H_REG, + HCLGE_CMDQ_RX_DEPTH_REG, + HCLGE_CMDQ_RX_TAIL_REG, + HCLGE_CMDQ_RX_HEAD_REG, + HCLGE_VECTOR0_CMDQ_SRC_REG, + HCLGE_CMDQ_INTR_STS_REG, + HCLGE_CMDQ_INTR_EN_REG, + HCLGE_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { "App Loopback test", "Serdes serial Loopback test", @@ -921,6 +977,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_config_gro(struct hclge_dev *hdev, bool en) +{ + struct hclge_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hclge_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "GRO hardware config cmd failed, ret = %d\n", ret); + + return ret; +} + static int hclge_alloc_tqps(struct hclge_dev *hdev) { struct hclge_tqp *tqp; @@ -1144,6 +1222,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) for (i = 0; i < num_vport; i++) { vport->back = hdev; vport->vport_id = i; + vport->mps = HCLGE_MAC_DEFAULT_FRAME; if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1873,37 +1952,6 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, return hclge_cfg_mac_speed_dup(hdev, speed, duplex); } -static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, - u8 *duplex) -{ - struct hclge_query_an_speed_dup_cmd *req; - struct hclge_desc desc; - int speed_tmp; - int ret; - - req = (struct hclge_query_an_speed_dup_cmd *)desc.data; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/autoneg/duplex query cmd failed %d\n", - ret); - return ret; - } - - *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); - - ret = hclge_parse_speed(speed_tmp, speed); - if (ret) - dev_err(&hdev->pdev->dev, - "could not parse speed(=%d), %d\n", speed_tmp, ret); - - return ret; -} - static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { struct hclge_config_auto_neg_cmd *req; @@ -1947,12 +1995,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) static int hclge_mac_init(struct hclge_dev *hdev) { - struct hnae3_handle *handle = &hdev->vport[0].nic; - struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - int mtu; int ret; + hdev->support_sfp_query = true; hdev->hw.mac.duplex = HCLGE_MAC_FULL; ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, hdev->hw.mac.duplex); @@ -1964,15 +2010,16 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - if (netdev) - mtu = netdev->mtu; - else - mtu = ETH_DATA_LEN; + ret = hclge_set_mac_mtu(hdev, hdev->mps); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); + return ret; + } - ret = hclge_set_mtu(handle, mtu); + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, - "set mtu failed ret=%d\n", ret); + "allocate buffer fail, ret=%d\n", ret); return ret; } @@ -2061,34 +2108,58 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } } +static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) +{ + struct hclge_sfp_speed_cmd *resp = NULL; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); + resp = (struct hclge_sfp_speed_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP do not support get SFP speed %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); + return ret; + } + + *speed = resp->sfp_speed; + + return 0; +} + static int hclge_update_speed_duplex(struct hclge_dev *hdev) { struct hclge_mac mac = hdev->hw.mac; - u8 duplex; int speed; int ret; - /* get the speed and duplex as autoneg'result from mac cmd when phy + /* get the speed from SFP cmd when phy * doesn't exit. */ - if (mac.phydev || !mac.autoneg) + if (mac.phydev) return 0; - ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac autoneg/speed/duplex query failed %d\n", ret); - return ret; - } + /* if IMP does not support get SFP/qSFP speed, return directly */ + if (!hdev->support_sfp_query) + return 0; - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); + ret = hclge_get_sfp_speed(hdev, &speed); + if (ret == -EOPNOTSUPP) { + hdev->support_sfp_query = false; + return ret; + } else if (ret) { return ret; } - return 0; + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ + + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); } static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) @@ -2129,12 +2200,13 @@ static void hclge_service_complete(struct hclge_dev *hdev) static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { - u32 rst_src_reg; - u32 cmdq_src_reg; + u32 rst_src_reg, cmdq_src_reg, msix_src_reg; /* fetch the events from their corresponding regs */ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + msix_src_reg = hclge_read_dev(&hdev->hw, + HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); /* Assumption: If by any chance reset and mailbox events are reported * together then we will only process reset event in this go and will @@ -2144,6 +2216,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) */ /* check for vector0 reset event sources */ + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); @@ -2160,12 +2240,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) return HCLGE_VECTOR0_EVENT_RST; } - if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { - dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); - set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); - return HCLGE_VECTOR0_EVENT_RST; - } + /* check for vector0 msix event source */ + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) + return HCLGE_VECTOR0_EVENT_ERR; /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { @@ -2217,6 +2294,19 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) /* vector 0 interrupt is shared with reset and mailbox source events.*/ switch (event_cause) { + case HCLGE_VECTOR0_EVENT_ERR: + /* we do not know what type of reset is required now. This could + * only be decided after we fetch the type of errors which + * caused this event. Therefore, we will do below for now: + * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we + * have defered type of reset to be used. + * 2. Schedule the reset serivce task. + * 3. When service task receives HNAE3_UNKNOWN_RESET type it + * will fetch the correct type of reset. This would be done + * by first decoding the types of errors. + */ + set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); + /* fall through */ case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; @@ -2352,11 +2442,15 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev, static int hclge_reset_wait(struct hclge_dev *hdev) { #define HCLGE_RESET_WATI_MS 100 -#define HCLGE_RESET_WAIT_CNT 5 +#define HCLGE_RESET_WAIT_CNT 200 u32 val, reg, reg_bit; u32 cnt = 0; switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; case HNAE3_GLOBAL_RESET: reg = HCLGE_GLOBAL_RESET_REG; reg_bit = HCLGE_GLOBAL_RESET_BIT; @@ -2369,6 +2463,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev) reg = HCLGE_FUN_RST_ING; reg_bit = HCLGE_FUN_RST_ING_B; break; + case HNAE3_FLR_RESET: + break; default: dev_err(&hdev->pdev->dev, "Wait for unsupported reset type: %d\n", @@ -2376,6 +2472,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return -EINVAL; } + if (hdev->reset_type == HNAE3_FLR_RESET) { + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < HCLGE_RESET_WAIT_CNT) + msleep(HCLGE_RESET_WATI_MS); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout: %d\n", cnt); + return -EBUSY; + } + + return 0; + } + val = hclge_read_dev(&hdev->hw, reg); while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); @@ -2392,6 +2502,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%d) rst failed %d!\n", + vport->vport_id, ret); + return ret; + } + + if (!reset) + continue; + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%d) failed %d!\n", + vport->vport_id, ret); + } + + return 0; +} + int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; @@ -2434,6 +2593,12 @@ static void hclge_do_reset(struct hclge_dev *hdev) set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; + case HNAE3_FLR_RESET: + dev_info(&pdev->dev, "FLR requested\n"); + /* schedule again to check later */ + set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + break; default: dev_warn(&pdev->dev, "Unsupported reset type: %d\n", hdev->reset_type); @@ -2446,6 +2611,23 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + /* first, resolve any unknown reset type to the known type(s) */ + if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { + /* we will intentionally ignore any errors from this function + * as we will end up in *some* reset request in any case + */ + hclge_handle_hw_msix_error(hdev, addr); + clear_bit(HNAE3_UNKNOWN_RESET, addr); + /* We defered the clearing of the error event which caused + * interrupt since it was not posssible to do that in + * interrupt context (and this is the reason we introduced + * new UNKNOWN reset type). Now, the errors have been + * handled and cleared in hardware we can safely enable + * interrupts. This is an exception to the norm. + */ + hclge_enable_vector(&hdev->misc_vector, true); + } + /* return the highest priority reset level amongst all */ if (test_bit(HNAE3_IMP_RESET, addr)) { rst_level = HNAE3_IMP_RESET; @@ -2465,6 +2647,9 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, } else if (test_bit(HNAE3_FUNC_RESET, addr)) { rst_level = HNAE3_FUNC_RESET; clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); } return rst_level; @@ -2495,12 +2680,34 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) hclge_enable_vector(&hdev->misc_vector, true); } +static int hclge_reset_prepare_down(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, true); + break; + default: + break; + } + + return ret; +} + static int hclge_reset_prepare_wait(struct hclge_dev *hdev) { + u32 reg_val; int ret = 0; switch (hdev->reset_type) { case HNAE3_FUNC_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); ret = hclge_func_reset_cmd(hdev, 0); if (ret) { dev_err(&hdev->pdev->dev, @@ -2515,6 +2722,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) */ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); break; + case HNAE3_FLR_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + case HNAE3_IMP_RESET: + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; default: break; } @@ -2562,6 +2782,23 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) return false; } +static int hclge_reset_prepare_up(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + default: + break; + } + + return ret; +} + static void hclge_reset(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); @@ -2579,6 +2816,10 @@ static void hclge_reset(struct hclge_dev *hdev) if (ret) goto err_reset; + ret = hclge_reset_prepare_down(hdev); + if (ret) + goto err_reset; + rtnl_lock(); ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); if (ret) @@ -2614,6 +2855,10 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_clear_reset_cause(hdev); + ret = hclge_reset_prepare_up(hdev); + if (ret) + goto err_reset_lock; + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); if (ret) goto err_reset_lock; @@ -2752,6 +2997,23 @@ static void hclge_mailbox_service_task(struct work_struct *work) clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); } +static void hclge_update_vport_alive(struct hclge_dev *hdev) +{ + int i; + + /* start from vport 1 for PF is always alive */ + for (i = 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + + /* If vf is not alive, set to default value */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + } +} + static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = @@ -2764,6 +3026,7 @@ static void hclge_service_task(struct work_struct *work) hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); + hclge_update_vport_alive(hdev); hclge_service_complete(hdev); } @@ -5049,6 +5312,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle) hclge_update_link_status(hdev); } +int hclge_vport_start(struct hclge_vport *vport) +{ + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->last_active_jiffies = jiffies; + return 0; +} + +void hclge_vport_stop(struct hclge_vport *vport) +{ + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); +} + +static int hclge_client_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_vport_start(vport); +} + +static void hclge_client_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + hclge_vport_stop(vport); +} + static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, u16 cmdq_resp, u8 resp_code, enum hclge_mac_vlan_tbl_opcode op) @@ -6196,54 +6485,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclge_set_vlan_rx_offload_cfg(vport); } -static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; struct hclge_desc desc; - int max_frm_size; - int ret; - - max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if (max_frm_size < HCLGE_MAC_MIN_FRAME || - max_frm_size > HCLGE_MAC_MAX_FRAME) - return -EINVAL; - - max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); req = (struct hclge_config_max_frm_size_cmd *)desc.data; - req->max_frm_size = cpu_to_le16(max_frm_size); + req->max_frm_size = cpu_to_le16(new_mps); req->min_frm_size = HCLGE_MAC_MIN_FRAME; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); - else - hdev->mps = max_frm_size; - - return ret; + return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_set_vport_mtu(vport, new_mtu); +} + +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) +{ struct hclge_dev *hdev = vport->back; - int ret; + int i, max_frm_size, ret = 0; + + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > HCLGE_MAC_MAX_FRAME) + return -EINVAL; + + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + mutex_lock(&hdev->vport_lock); + /* VF's mps must fit within hdev->mps */ + if (vport->vport_id && max_frm_size > hdev->mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } else if (vport->vport_id) { + vport->mps = max_frm_size; + mutex_unlock(&hdev->vport_lock); + return 0; + } + + /* PF's mps must be greater then VF's mps */ + for (i = 1; i < hdev->num_alloc_vport; i++) + if (max_frm_size < hdev->vport[i].mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } + + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - ret = hclge_set_mac_mtu(hdev, new_mtu); + ret = hclge_set_mac_mtu(hdev, max_frm_size); if (ret) { dev_err(&hdev->pdev->dev, "Change mtu fail, ret =%d\n", ret); - return ret; + goto out; } + hdev->mps = max_frm_size; + vport->mps = max_frm_size; + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, "Allocate buffer fail, ret =%d\n", ret); +out: + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + mutex_unlock(&hdev->vport_lock); return ret; } @@ -6291,8 +6602,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } -static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, - u16 queue_id) +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) { struct hnae3_queue *queue; struct hclge_tqp *tqp; @@ -6443,7 +6753,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -6815,6 +7125,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev) cancel_work_sync(&hdev->mbx_service_task); } +static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGE_FLR_WAIT_MS 100 +#define HCLGE_FLR_WAIT_CNT 50 + struct hclge_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGE_FLR_WAIT_CNT) + msleep(HCLGE_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); +} + +static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -6832,6 +7170,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; + hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + + mutex_init(&hdev->vport_lock); ret = hclge_pci_init(hdev); if (ret) { @@ -6923,6 +7264,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_config_gro(hdev, true); + if (ret) + goto err_mdiobus_unreg; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -6958,7 +7303,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_hw_error_set_state(hdev, true); if (ret) { dev_err(&pdev->dev, - "hw error interrupts enable failed, ret =%d\n", ret); + "fail(%d) to enable hw error interrupts\n", ret); goto err_mdiobus_unreg; } @@ -7004,6 +7349,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev) memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); } +static void hclge_reset_vport_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_vport_start(vport); + vport++; + } +} + static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; @@ -7054,6 +7410,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_config_gro(hdev, true); + if (ret) + return ret; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -7079,11 +7439,17 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - /* Re-enable the TM hw error interrupts because - * they get disabled on core/global reset. + /* Re-enable the hw error interrupts because + * the interrupts get disabled on core/global reset. */ - if (hclge_enable_tm_hw_error(hdev, true)) - dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); + ret = hclge_hw_error_set_state(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable HNS hw error interrupts\n", ret); + return ret; + } + + hclge_reset_vport_state(hdev); dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -7111,6 +7477,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } @@ -7364,8 +7731,15 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, return 0; } +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFFFFFFFF +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + static int hclge_get_regs_len(struct hnae3_handle *handle) { + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; @@ -7378,15 +7752,25 @@ static int hclge_get_regs_len(struct hnae3_handle *handle) return -EOPNOTSUPP; } - return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + + regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); } static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, void *data) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; + int i, j, reg_um, separator_num; + u32 *reg = data; int ret; *version = hdev->fw_version; @@ -7398,16 +7782,53 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, return; } - ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); + /* fetching per-PF registers valus from PF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < kinfo->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + 0x200 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + /* fetching PF common registers values from firmware */ + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); if (ret) { dev_err(&hdev->pdev->dev, "Get 32 bit register failed, ret = %d.\n", ret); return; } - data = (u32 *)data + regs_num_32_bit; - ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, - data); + reg += regs_num_32_bit; + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); if (ret) dev_err(&hdev->pdev->dev, "Get 64 bit register failed, ret = %d.\n", ret); @@ -7470,9 +7891,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } +static int hclge_gro_en(struct hnae3_handle *handle, int enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_config_gro(hdev, enable); +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, + .flr_prepare = hclge_flr_prepare, + .flr_done = hclge_flr_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, @@ -7483,6 +7914,8 @@ static const struct hnae3_ae_ops hclge_ops = { .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, + .client_start = hclge_client_start, + .client_stop = hclge_client_stop, .get_status = hclge_get_status, .get_ksettings_an_result = hclge_get_ksettings_an_result, .update_speed_duplex_h = hclge_update_speed_duplex_h, @@ -7535,10 +7968,13 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fd_all_rules = hclge_get_all_rules, .restore_fd_rules = hclge_restore_fd_entries, .enable_fd = hclge_enable_fd, - .process_hw_error = hclge_process_ras_hw_error, + .dbg_run_cmd = hclge_dbg_run_cmd, + .handle_hw_ras_error = hclge_handle_hw_ras_error, .get_hw_reset_stat = hclge_get_hw_reset_stat, .ae_dev_resetting = hclge_ae_dev_resetting, .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, + .set_gro_en = hclge_gro_en, + .get_global_queue_id = hclge_covert_handle_qid_global, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 36f341368683..9571e228ab42 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -28,6 +28,62 @@ #define HCLGE_VECTOR_REG_OFFSET 0x4 #define HCLGE_VECTOR_VF_OFFSET 0x100000 +#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 +#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 +#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 +#define HCLGE_CMDQ_TX_TAIL_REG 0x27010 +#define HCLGE_CMDQ_TX_HEAD_REG 0x27014 +#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 +#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C +#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 +#define HCLGE_CMDQ_RX_TAIL_REG 0x27024 +#define HCLGE_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGE_CMDQ_INTR_SRC_REG 0x27100 +#define HCLGE_CMDQ_INTR_STS_REG 0x27104 +#define HCLGE_CMDQ_INTR_EN_REG 0x27108 +#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C + +/* bar registers for common func */ +#define HCLGE_VECTOR0_OTER_EN_REG 0x20600 +#define HCLGE_RAS_OTHER_STS_REG 0x20B00 +#define HCLGE_FUNC_RESET_STS_REG 0x20C00 +#define HCLGE_GRO_EN_REG 0x28000 + +/* bar registers for rcb */ +#define HCLGE_RING_RX_ADDR_L_REG 0x80000 +#define HCLGE_RING_RX_ADDR_H_REG 0x80004 +#define HCLGE_RING_RX_BD_NUM_REG 0x80008 +#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGE_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGE_RING_RX_TAIL_REG 0x80018 +#define HCLGE_RING_RX_HEAD_REG 0x8001C +#define HCLGE_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGE_RING_RX_OFFSET_REG 0x80024 +#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGE_RING_RX_STASH_REG 0x80030 +#define HCLGE_RING_RX_BD_ERR_REG 0x80034 +#define HCLGE_RING_TX_ADDR_L_REG 0x80040 +#define HCLGE_RING_TX_ADDR_H_REG 0x80044 +#define HCLGE_RING_TX_BD_NUM_REG 0x80048 +#define HCLGE_RING_TX_PRIORITY_REG 0x8004C +#define HCLGE_RING_TX_TC_REG 0x80050 +#define HCLGE_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGE_RING_TX_TAIL_REG 0x80058 +#define HCLGE_RING_TX_HEAD_REG 0x8005C +#define HCLGE_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGE_RING_TX_OFFSET_REG 0x80064 +#define HCLGE_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGE_RING_TX_BD_ERR_REG 0x80074 +#define HCLGE_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGE_TQP_INTR_CTRL_REG 0x20000 +#define HCLGE_TQP_INTR_GL0_REG 0x20100 +#define HCLGE_TQP_INTR_GL1_REG 0x20200 +#define HCLGE_TQP_INTR_GL2_REG 0x20300 +#define HCLGE_TQP_INTR_RL_REG 0x20900 + #define HCLGE_RSS_IND_TBL_SIZE 512 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) #define HCLGE_RSS_KEY_SIZE 40 @@ -97,6 +153,7 @@ enum HLCGE_PORT_TYPE { #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) /* Reset related Registers */ +#define HCLGE_PF_OTHER_INT_REG 0x20600 #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 @@ -116,8 +173,10 @@ enum HLCGE_PORT_TYPE { /* CMDQ register bits for RX event(=MBX event) */ #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 +#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 + #define HCLGE_MAC_DEFAULT_FRAME \ - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) + (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) #define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MAX_FRAME 9728 @@ -146,12 +205,14 @@ enum HCLGE_DEV_STATE { enum hclge_evt_cause { HCLGE_VECTOR0_EVENT_RST, HCLGE_VECTOR0_EVENT_MBX, + HCLGE_VECTOR0_EVENT_ERR, HCLGE_VECTOR0_EVENT_OTHER, }; #define HCLGE_MPF_ENBALE 1 enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ @@ -594,6 +655,7 @@ struct hclge_dev { struct hclge_misc_vector misc_vector; struct hclge_hw_stats hw_stats; unsigned long state; + unsigned long flr_state; unsigned long last_reset_time; enum hnae3_reset_type reset_type; @@ -620,6 +682,7 @@ struct hclge_dev { u8 hw_tc_map; u8 tc_num_last_time; enum hclge_fc_mode fc_mode_last_time; + u8 support_sfp_query; #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 @@ -674,6 +737,8 @@ struct hclge_dev { u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 mps; /* Max packet size */ + /* vport_lock protect resource shared by vports */ + struct mutex vport_lock; struct hclge_vlan_type_cfg vlan_type_cfg; @@ -724,6 +789,11 @@ struct hclge_rss_tuple_cfg { u8 ipv6_fragment_en; }; +enum HCLGE_VPORT_STATE { + HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAX +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -749,6 +819,10 @@ struct hclge_vport { struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; + + unsigned long state; + unsigned long last_active_jiffies; + u32 mps; /* Max packet size */ }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -775,6 +849,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) return tqp->index; } +static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) +{ + return !!hdev->reset_pending; +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); @@ -784,9 +864,15 @@ int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); void hclge_mbx_handler(struct hclge_dev *hdev); int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); +int hclge_vport_start(struct hclge_vport *vport); +void hclge_vport_stop(struct hclge_vport *vport); +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); +int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f890022938d9..a1de451a85df 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } -static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + enum hnae3_reset_type reset_type; u8 msg_data[2]; u8 dest_vfid; dest_vfid = (u8)vport->vport_id; + if (hdev->reset_type == HNAE3_FUNC_RESET) + reset_type = HNAE3_VF_PF_FUNC_RESET; + else if (hdev->reset_type == HNAE3_FLR_RESET) + reset_type = HNAE3_VF_FULL_RESET; + else + return -EINVAL; + + memcpy(&msg_data[0], &reset_type, sizeof(u16)); + /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, return status; } +static int hclge_set_vf_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + bool alive = !!mbx_req->msg[2]; + int ret = 0; + + if (alive) + ret = hclge_vport_start(vport); + else + hclge_vport_stop(vport); + + return ret; +} + static int hclge_get_vf_tcinfo(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -363,24 +389,41 @@ static void hclge_reset_vf(struct hclge_vport *vport, int ret; dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", - mbx_req->mbx_src_vfid); - - /* Acknowledge VF that PF is now about to assert the reset for the VF. - * On receiving this message VF will get into pending state and will - * start polling for the hardware reset completion status. - */ - ret = hclge_inform_reset_assert_to_vf(vport); - if (ret) { - dev_err(&hdev->pdev->dev, - "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", - ret, vport->vport_id); - return; - } + vport->vport_id); - dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", - mbx_req->mbx_src_vfid); - /* reset this virtual function */ - hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); + ret = hclge_func_reset_cmd(hdev, vport->vport_id); + hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0); +} + +static void hclge_vf_keep_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + vport->last_active_jiffies = jiffies; +} + +static int hclge_set_vf_mtu(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + int ret; + u32 mtu; + + memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu)); + ret = hclge_set_vport_mtu(vport, mtu); + + return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0); +} + +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + u16 queue_id, qid_in_pf; + u8 resp_data[2]; + + memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); + memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf)); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); } static bool hclge_cmd_crq_empty(struct hclge_hw *hw) @@ -460,6 +503,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to config VF's VLAN\n", ret); break; + case HCLGE_MBX_SET_ALIVE: + ret = hclge_set_vf_alive(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to set VF's ALIVE\n", + ret); + break; case HCLGE_MBX_GET_QINFO: ret = hclge_get_vf_queue_info(vport, req, true); if (ret) @@ -487,6 +537,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_RESET: hclge_reset_vf(vport, req); break; + case HCLGE_MBX_KEEP_ALIVE: + hclge_vf_keep_alive(vport, req); + break; + case HCLGE_MBX_SET_MTU: + ret = hclge_set_vf_mtu(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "VF fail(%d) to set mtu\n", ret); + break; + case HCLGE_MBX_GET_QID_IN_PF: + ret = hclge_get_queue_id_in_pf(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get qid for VF\n", + ret); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 03018638f701..741cb3b9519d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -195,12 +195,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) { struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; int ret; if (!phydev) return 0; - phydev->supported &= ~SUPPORTED_FIBRE; + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, @@ -210,7 +211,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) return ret; } - phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + mask); + linkmode_and(phydev->supported, phydev->supported, mask); phy_support_asym_pause(phydev); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 494e562fe8c7..00458da67503 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1259,15 +1259,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) return 0; } -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) { struct hclge_vport *vport = hdev->vport; struct hnae3_knic_private_info *kinfo; u32 i, k; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - if (prio_tc[i] >= hdev->tm_info.num_tc) - return -EINVAL; hdev->tm_info.prio_tc[i] = prio_tc[i]; for (k = 0; k < hdev->num_alloc_vport; k++) { @@ -1275,18 +1273,12 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) kinfo->prio_tc[i] = prio_tc[i]; } } - return 0; } -int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { u8 i, bit_map = 0; - for (i = 0; i < hdev->num_alloc_vport; i++) { - if (num_tc > hdev->vport[i].alloc_tqps) - return -EINVAL; - } - hdev->tm_info.num_tc = num_tc; for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -1300,8 +1292,6 @@ int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hdev->hw_tc_map = bit_map; hclge_tm_schd_info_init(hdev); - - return 0; } int hclge_tm_init_hw(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 25eef13a3e14..b6496a439304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -40,6 +40,13 @@ struct hclge_nq_to_qs_link_cmd { __le16 qset_id; }; +struct hclge_tqp_tx_queue_tc_cmd { + __le16 queue_id; + __le16 rsvd; + u8 tc_id; + u8 rev[3]; +}; + struct hclge_pg_weight_cmd { u8 pg_id; u8 dwrr; @@ -55,6 +62,12 @@ struct hclge_qs_weight_cmd { u8 dwrr; }; +struct hclge_ets_tc_weight_cmd { + u8 tc_weight[HNAE3_MAX_TC]; + u8 weight_offset; + u8 rsvd[15]; +}; + #define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) #define HCLGE_TM_SHAP_IR_B_LSH 0 #define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) @@ -131,8 +144,8 @@ struct hclge_port_shapping_cmd { int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev); int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); -int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index b917acf71ee6..d5765c8cf3a3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -189,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclgevf_ring_space(&hw->cmq.csq)) { + if (num > hclgevf_ring_space(&hw->cmq.csq) || + test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -338,6 +339,16 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) spin_unlock_bh(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (hclgevf_is_reset_pending(hdev)) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EBUSY; + } + /* get firmware version */ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 090541de0c7d..47030b42341f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -87,6 +87,8 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + /* GRO command */ + HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, @@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; +#define HCLGEVF_GRO_EN_B 0 +struct hclgevf_cfg_gro_status_cmd { + __le16 gro_en; + u8 rsv[22]; +}; + #define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 #define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 29da4806c100..75327dcda3b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> +#include <linux/iopoll.h> #include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" @@ -10,8 +11,7 @@ #define HCLGEVF_NAME "hclgevf" -static int hclgevf_init_hdev(struct hclgevf_dev *hdev); -static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); static struct hnae3_ae_algo ae_algovf; static const struct pci_device_id ae_algovf_pci_tbl[] = { @@ -23,6 +23,58 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, + HCLGEVF_CMDQ_TX_ADDR_H_REG, + HCLGEVF_CMDQ_TX_DEPTH_REG, + HCLGEVF_CMDQ_TX_TAIL_REG, + HCLGEVF_CMDQ_TX_HEAD_REG, + HCLGEVF_CMDQ_RX_ADDR_L_REG, + HCLGEVF_CMDQ_RX_ADDR_H_REG, + HCLGEVF_CMDQ_RX_DEPTH_REG, + HCLGEVF_CMDQ_RX_TAIL_REG, + HCLGEVF_CMDQ_RX_HEAD_REG, + HCLGEVF_VECTOR0_CMDQ_SRC_REG, + HCLGEVF_CMDQ_INTR_STS_REG, + HCLGEVF_CMDQ_INTR_EN_REG, + HCLGEVF_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, + HCLGEVF_RST_ING, + HCLGEVF_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, + HCLGEVF_RING_RX_ADDR_H_REG, + HCLGEVF_RING_RX_BD_NUM_REG, + HCLGEVF_RING_RX_BD_LENGTH_REG, + HCLGEVF_RING_RX_MERGE_EN_REG, + HCLGEVF_RING_RX_TAIL_REG, + HCLGEVF_RING_RX_HEAD_REG, + HCLGEVF_RING_RX_FBD_NUM_REG, + HCLGEVF_RING_RX_OFFSET_REG, + HCLGEVF_RING_RX_FBD_OFFSET_REG, + HCLGEVF_RING_RX_STASH_REG, + HCLGEVF_RING_RX_BD_ERR_REG, + HCLGEVF_RING_TX_ADDR_L_REG, + HCLGEVF_RING_TX_ADDR_H_REG, + HCLGEVF_RING_TX_BD_NUM_REG, + HCLGEVF_RING_TX_PRIORITY_REG, + HCLGEVF_RING_TX_TC_REG, + HCLGEVF_RING_TX_MERGE_EN_REG, + HCLGEVF_RING_TX_TAIL_REG, + HCLGEVF_RING_TX_HEAD_REG, + HCLGEVF_RING_TX_FBD_NUM_REG, + HCLGEVF_RING_TX_OFFSET_REG, + HCLGEVF_RING_TX_EBD_NUM_REG, + HCLGEVF_RING_TX_EBD_OFFSET_REG, + HCLGEVF_RING_TX_BD_ERR_REG, + HCLGEVF_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, + HCLGEVF_TQP_INTR_GL0_REG, + HCLGEVF_TQP_INTR_GL1_REG, + HCLGEVF_TQP_INTR_GL2_REG, + HCLGEVF_TQP_INTR_RL_REG}; + static inline struct hclgevf_dev *hclgevf_ae_get_hdev( struct hnae3_handle *handle) { @@ -204,17 +256,28 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return 0; } +static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data[2], resp_data[2]; + u16 qid_in_pf = 0; + int ret; + + memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, + 2, true, resp_data, 2); + if (!ret) + qid_in_pf = *(u16 *)resp_data; + + return qid_in_pf; +} + static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { struct hclgevf_tqp *tqp; int i; - /* if this is on going reset then we need to re-allocate the TPQs - * since we cannot assume we would get same number of TPQs back from PF - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, hdev->htqp); - hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hclgevf_tqp), GFP_KERNEL); if (!hdev->htqp) @@ -258,12 +321,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) new_tqps = kinfo->rss_size * kinfo->num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); - /* if this is on going reset then we need to re-allocate the hnae queues - * as well since number of TPQs from PF might have changed. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) @@ -868,6 +925,9 @@ static int hclgevf_unmap_ring_from_vector( struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int ret, vector_id; + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return 0; + vector_id = hclgevf_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&handle->pdev->dev, @@ -1090,38 +1150,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 2, true, NULL, 0); } +static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, + sizeof(new_mtu), true, NULL, 0); +} + static int hclgevf_notify_client(struct hclgevf_dev *hdev, enum hnae3_reset_notify_type type) { struct hnae3_client *client = hdev->nic_client; struct hnae3_handle *handle = &hdev->nic; + int ret; if (!client->ops->reset_notify) return -EOPNOTSUPP; - return client->ops->reset_notify(handle, type); + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; +} + +static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + +static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, + unsigned long delay_us, + unsigned long wait_cnt) +{ + unsigned long cnt = 0; + + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < wait_cnt) + usleep_range(delay_us, delay_us * 2); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout\n"); + return -ETIMEDOUT; + } + + return 0; } static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { -#define HCLGEVF_RESET_WAIT_MS 500 -#define HCLGEVF_RESET_WAIT_CNT 20 - u32 val, cnt = 0; +#define HCLGEVF_RESET_WAIT_US 20000 +#define HCLGEVF_RESET_WAIT_CNT 2000 +#define HCLGEVF_RESET_WAIT_TIMEOUT_US \ + (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) + + u32 val; + int ret; /* wait to check the hardware reset completion status */ - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { - msleep(HCLGEVF_RESET_WAIT_MS); - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - cnt++; - } + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); + + if (hdev->reset_type == HNAE3_FLR_RESET) + return hclgevf_flr_poll_timeout(hdev, + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_CNT); + + ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ - if (cnt >= HCLGEVF_RESET_WAIT_CNT) { - dev_warn(&hdev->pdev->dev, - "could'nt get reset done status from h/w, timeout!\n"); - return -EBUSY; + if (ret) { + dev_err(&hdev->pdev->dev, + "could'nt get reset done status from h/w, timeout!\n"); + return ret; } /* we will wait a bit more to let reset of the stack to complete. This @@ -1138,10 +1247,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) int ret; /* uninitialize the nic client */ - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; /* re-initialize the hclge device */ - ret = hclgevf_init_hdev(hdev); + ret = hclgevf_reset_hdev(hdev); if (ret) { dev_err(&hdev->pdev->dev, "hclge device re-init failed, VF is disabled!\n"); @@ -1149,23 +1260,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) } /* bring up the nic client again */ - hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; return 0; } +static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_VF_FUNC_RESET: + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, + 0, true, NULL, sizeof(u8)); + break; + case HNAE3_FLR_RESET: + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + default: + break; + } + + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", + hdev->reset_type, ret); + + return ret; +} + static int hclgevf_reset(struct hclgevf_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; + /* Initialize ae_dev reset status as well, in case enet layer wants to + * know if device is undergoing reset + */ + ae_dev->reset_type = hdev->reset_type; hdev->reset_count++; rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset_lock; rtnl_unlock(); + ret = hclgevf_reset_prepare_wait(hdev); + if (ret) + goto err_reset; + /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ @@ -1175,42 +1323,37 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) dev_err(&hdev->pdev->dev, "VF failed(=%d) to fetch H/W reset completion status\n", ret); - - dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); - rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); - - rtnl_unlock(); - return ret; + goto err_reset; } rtnl_lock(); /* now, re-initialize the nic client and ae device*/ ret = hclgevf_reset_stack(hdev); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + goto err_reset_lock; + } /* bring up the nic to enable TX/RX again */ - hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset_lock; rtnl_unlock(); return ret; -} - -static int hclgevf_do_reset(struct hclgevf_dev *hdev) -{ - int status; - u8 respmsg; - - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, - 0, false, &respmsg, sizeof(u8)); - if (status) - dev_err(&hdev->pdev->dev, - "VF reset request to PF failed(=%d)\n", status); +err_reset_lock: + rtnl_unlock(); +err_reset: + /* When VF reset failed, only the higher level reset asserted by PF + * can restore it, so re-initialize the command queue to receive + * this higher reset event. + */ + hclgevf_cmd_init(hdev); + dev_err(&hdev->pdev->dev, "failed to reset VF\n"); - return status; + return ret; } static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, @@ -1218,9 +1361,26 @@ static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + /* return the highest priority reset level amongst all */ if (test_bit(HNAE3_VF_RESET, addr)) { rst_level = HNAE3_VF_RESET; clear_bit(HNAE3_VF_RESET, addr); + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { + rst_level = HNAE3_VF_FULL_RESET; + clear_bit(HNAE3_VF_FULL_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_PF_FUNC_RESET; + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_FUNC_RESET; + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); } return rst_level; @@ -1229,16 +1389,17 @@ static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclgevf_dev *hdev = ae_dev->priv; dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); - if (!hdev->default_reset_request) + if (hdev->default_reset_request) hdev->reset_level = hclgevf_get_reset_level(hdev, &hdev->default_reset_request); else - hdev->reset_level = HNAE3_VF_RESET; + hdev->reset_level = HNAE3_VF_FUNC_RESET; /* reset of this VF requested */ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); @@ -1255,6 +1416,27 @@ static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, set_bit(rst_type, &hdev->default_reset_request); } +static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGEVF_FLR_WAIT_MS 100 +#define HCLGEVF_FLR_WAIT_CNT 50 + struct hclgevf_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclgevf_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGEVF_FLR_WAIT_CNT) + msleep(HCLGEVF_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); +} + static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1341,9 +1523,15 @@ static void hclgevf_reset_service_task(struct work_struct *work) */ hdev->reset_attempts = 0; - ret = hclgevf_reset(hdev); - if (ret) - dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); + hdev->last_reset_time = jiffies; + while ((hdev->reset_type = + hclgevf_get_reset_level(hdev, &hdev->reset_pending)) + != HNAE3_NONE_RESET) { + ret = hclgevf_reset(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "VF stack reset failed %d.\n", ret); + } } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state)) { /* we could be here when either of below happens: @@ -1372,19 +1560,17 @@ static void hclgevf_reset_service_task(struct work_struct *work) */ if (hdev->reset_attempts > 3) { /* prepare for full reset of stack + pcie interface */ - hdev->reset_level = HNAE3_VF_FULL_RESET; + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - /* request PF for resetting this VF via mailbox */ - ret = hclgevf_do_reset(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, - "VF rst fail, stack will call\n"); + set_bit(hdev->reset_level, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } + hclgevf_reset_task_schedule(hdev); } clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); @@ -1406,6 +1592,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work) clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); } +static void hclgevf_keep_alive_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); + + schedule_work(&hdev->keep_alive_task); + mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); +} + +static void hclgevf_keep_alive_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev; + u8 respmsg; + int ret; + + hdev = container_of(work, struct hclgevf_dev, keep_alive_task); + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, + 0, false, &respmsg, sizeof(u8)); + if (ret) + dev_err(&hdev->pdev->dev, + "VF sends keep alive cmd failed(=%d)\n", ret); +} + static void hclgevf_service_task(struct work_struct *work) { struct hclgevf_dev *hdev; @@ -1427,24 +1635,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); } -static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) +static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, + u32 *clearval) { - u32 cmdq_src_reg; + u32 cmdq_src_reg, rst_ing_reg; /* fetch the events from their corresponding regs */ cmdq_src_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG); + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { + rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, + "receive reset interrupt 0x%x!\n", rst_ing_reg); + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); + *clearval = cmdq_src_reg; + return HCLGEVF_VECTOR0_EVENT_RST; + } + /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); *clearval = cmdq_src_reg; - return true; + return HCLGEVF_VECTOR0_EVENT_MBX; } dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); - return false; + return HCLGEVF_VECTOR0_EVENT_OTHER; } static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) @@ -1454,19 +1675,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; hclgevf_enable_vector(&hdev->misc_vector, false); - if (!hclgevf_check_event_cause(hdev, &clearval)) - goto skip_sched; + event_cause = hclgevf_check_evt_cause(hdev, &clearval); - hclgevf_mbx_handler(hdev); - - hclgevf_clear_event_cause(hdev, clearval); + switch (event_cause) { + case HCLGEVF_VECTOR0_EVENT_RST: + hclgevf_reset_task_schedule(hdev); + break; + case HCLGEVF_VECTOR0_EVENT_MBX: + hclgevf_mbx_handler(hdev); + break; + default: + break; + } -skip_sched: - hclgevf_enable_vector(&hdev->misc_vector, true); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { + hclgevf_clear_event_cause(hdev, clearval); + hclgevf_enable_vector(&hdev->misc_vector, true); + } return IRQ_HANDLED; } @@ -1524,6 +1754,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +{ + struct hclgevf_cfg_gro_status_cmd *req; + struct hclgevf_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, + false); + req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "VF GRO hardware config cmd failed, ret = %d.\n", ret); + + return ret; +} + static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) { struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; @@ -1613,12 +1866,40 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) hclgevf_update_link_status(hdev, 0); } -static void hclgevf_state_init(struct hclgevf_dev *hdev) +static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) { - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data; + msg_data = alive ? 1 : 0; + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, + 0, &msg_data, 1, false, NULL, 0); +} + +static int hclgevf_client_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); + return hclgevf_set_alive(handle, true); +} + +static void hclgevf_client_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_set_alive(handle, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "%s failed %d\n", __func__, ret); + + del_timer_sync(&hdev->keep_alive_timer); + cancel_work_sync(&hdev->keep_alive_task); +} + +static void hclgevf_state_init(struct hclgevf_dev *hdev) +{ /* setup tasks for the MBX */ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); @@ -1660,10 +1941,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) vectors = pci_alloc_irq_vectors(pdev, hdev->roce_base_msix_offset + 1, @@ -1702,6 +1979,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(int), GFP_KERNEL); if (!hdev->vector_irq) { + devm_kfree(&pdev->dev, hdev->vector_status); pci_free_irq_vectors(pdev); return -ENOMEM; } @@ -1713,6 +1991,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + devm_kfree(&pdev->dev, hdev->vector_status); + devm_kfree(&pdev->dev, hdev->vector_irq); pci_free_irq_vectors(pdev); } @@ -1720,10 +2000,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { int ret = 0; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - hclgevf_get_misc_vector(hdev); ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, @@ -1853,14 +2129,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) struct hclgevf_hw *hw; int ret; - /* check if we need to skip initialization of pci. This will happen if - * device is undergoing VF reset. Otherwise, we would need to - * re-initialize pci interface again i.e. when device is not going - * through *any* reset or actually undergoing full reset. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); @@ -1949,17 +2217,86 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) return 0; } -static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +static int hclgevf_pci_reset(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = 0; + + if (hdev->reset_type == HNAE3_VF_FULL_RESET && + test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + pci_set_master(pdev); + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed(%d) to init MSI/MSI-X\n", ret); + return ret; + } + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + hclgevf_uninit_msi(hdev); + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + return ret; + } + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + return ret; +} + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; int ret; - /* check if device is on-going full reset(i.e. pcie as well) */ - if (hclgevf_dev_ongoing_full_reset(hdev)) { - dev_warn(&pdev->dev, "device is going full reset\n"); - hclgevf_uninit_hdev(hdev); + ret = hclgevf_pci_reset(hdev); + if (ret) { + dev_err(&pdev->dev, "pci reset failed %d\n", ret); + return ret; } + ret = hclgevf_cmd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "cmd failed %d\n", ret); + return ret; + } + + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + return ret; + } + + ret = hclgevf_config_gro(hdev, true); + if (ret) + return ret; + + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + return ret; + } + + dev_info(&hdev->pdev->dev, "Reset done\n"); + + return 0; +} + +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + ret = hclgevf_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI initialization failed\n"); @@ -1991,7 +2328,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) } hclgevf_state_init(hdev); - hdev->reset_level = HNAE3_VF_RESET; + hdev->reset_level = HNAE3_VF_FUNC_RESET; ret = hclgevf_misc_irq_init(hdev); if (ret) { @@ -2000,6 +2337,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_misc_irq_init; } + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + ret = hclgevf_configure(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); @@ -2018,6 +2357,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + ret = hclgevf_config_gro(hdev, true); + if (ret) + goto err_config; + /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -2047,21 +2390,27 @@ err_cmd_init: hclgevf_cmd_uninit(hdev); err_cmd_queue_init: hclgevf_pci_uninit(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; } static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { hclgevf_state_uninit(hdev); - hclgevf_misc_irq_uninit(hdev); + + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + hclgevf_pci_uninit(hdev); + } + hclgevf_cmd_uninit(hdev); - hclgevf_uninit_msi(hdev); - hclgevf_pci_uninit(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev; int ret; ret = hclgevf_alloc_hdev(ae_dev); @@ -2071,10 +2420,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) } ret = hclgevf_init_hdev(ae_dev->priv); - if (ret) + if (ret) { dev_err(&pdev->dev, "hclge device initialization failed\n"); + return ret; + } - return ret; + hdev = ae_dev->priv; + timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); + INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); + + return 0; } static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -2151,6 +2506,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } +static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_config_gro(hdev, enable); +} + static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type) { @@ -2163,7 +2525,7 @@ static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); + return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); } static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) @@ -2180,13 +2542,83 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) return hdev->reset_count; } +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFFFFFFFF +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + +static int hclgevf_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; +} + +static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, j, reg_um, separator_num; + u32 *reg = data; + + *version = hdev->fw_version; + + /* fetching per-VF registers values from VF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + 0x200 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, + .flr_prepare = hclgevf_flr_prepare, + .flr_done = hclgevf_flr_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, + .client_start = hclgevf_client_start, + .client_stop = hclgevf_client_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .get_vector = hclgevf_get_vector, @@ -2217,12 +2649,17 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_default_reset_request = hclgevf_set_def_reset_request, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, + .get_regs_len = hclgevf_get_regs_len, + .get_regs = hclgevf_get_regs, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, .get_media_type = hclgevf_get_media_type, .get_hw_reset_stat = hclgevf_get_hw_reset_stat, .ae_dev_resetting = hclgevf_ae_dev_resetting, .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, + .set_gro_en = hclgevf_gro_en, + .set_mtu = hclgevf_set_mtu, + .get_global_queue_id = hclgevf_get_qid_global, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 0ea4c9b3ae0a..787bc06944e5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -27,15 +27,77 @@ #define HCLGEVF_VECTOR_REG_OFFSET 0x4 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 +/* bar registers for cmdq */ +#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 +#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 +#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 +#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 +#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 +#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 +#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C +#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 +#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 +#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100 +#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104 +#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 +#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C + +/* bar registers for common func */ +#define HCLGEVF_GRO_EN_REG 0x28000 + +/* bar registers for rcb */ +#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000 +#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004 +#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008 +#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGEVF_RING_RX_TAIL_REG 0x80018 +#define HCLGEVF_RING_RX_HEAD_REG 0x8001C +#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGEVF_RING_RX_OFFSET_REG 0x80024 +#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGEVF_RING_RX_STASH_REG 0x80030 +#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034 +#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040 +#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044 +#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048 +#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C +#define HCLGEVF_RING_TX_TC_REG 0x80050 +#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGEVF_RING_TX_TAIL_REG 0x80058 +#define HCLGEVF_RING_TX_HEAD_REG 0x8005C +#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGEVF_RING_TX_OFFSET_REG 0x80064 +#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074 +#define HCLGEVF_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000 +#define HCLGEVF_TQP_INTR_GL0_REG 0x20100 +#define HCLGEVF_TQP_INTR_GL1_REG 0x20200 +#define HCLGEVF_TQP_INTR_GL2_REG 0x20300 +#define HCLGEVF_TQP_INTR_RL_REG 0x20900 + /* Vector0 interrupt CMDQ event source register(RW) */ #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 +/* RST register bits for RESET event */ +#define HCLGEVF_VECTOR0_RST_INT_B 2 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 /* Reset related Registers */ -#define HCLGEVF_FUN_RST_ING 0x20C00 -#define HCLGEVF_FUN_RST_ING_B 0 +#define HCLGEVF_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_BIT BIT(0) +#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5) +#define HCLGEVF_CORE_RST_ING_BIT BIT(6) +#define HCLGEVF_IMP_RST_ING_BIT BIT(7) +#define HCLGEVF_RST_ING_BITS \ + (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ + HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff @@ -54,17 +116,25 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +enum hclgevf_evt_cause { + HCLGEVF_VECTOR0_EVENT_RST, + HCLGEVF_VECTOR0_EVENT_MBX, + HCLGEVF_VECTOR0_EVENT_OTHER, +}; + /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ HCLGEVF_STATE_DOWN, HCLGEVF_STATE_DISABLED, + HCLGEVF_STATE_IRQ_INITED, /* task states */ HCLGEVF_STATE_SERVICE_SCHED, HCLGEVF_STATE_RST_SERVICE_SCHED, HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, + HCLGEVF_STATE_CMD_DISABLE, }; #define HCLGEVF_MPF_ENBALE 1 @@ -145,9 +215,12 @@ struct hclgevf_dev { struct hclgevf_misc_vector misc_vector; struct hclgevf_rss_cfg rss_cfg; unsigned long state; + unsigned long flr_state; unsigned long default_reset_request; unsigned long last_reset_time; enum hnae3_reset_type reset_level; + unsigned long reset_pending; + enum hnae3_reset_type reset_type; #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 @@ -182,7 +255,9 @@ struct hclgevf_dev { struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct timer_list service_timer; + struct timer_list keep_alive_timer; struct work_struct service_task; + struct work_struct keep_alive_task; struct work_struct rst_service_task; struct work_struct mbx_service_task; @@ -196,18 +271,9 @@ struct hclgevf_dev { u32 flag; }; -static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) -{ - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->reset_level == HNAE3_VF_RESET)); -} - -static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) +static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) { - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->reset_level == HNAE3_VF_FULL_RESET)); + return !!hdev->reset_pending; } int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 558e3b838676..ef9c8e6eca28 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, } while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + return -EIO; + udelay(HCLGEVF_SLEEP_USCOEND); i++; } @@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, "vf crq need init\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; @@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + enum hnae3_reset_type reset_type; u16 link_status; u16 *msg_q; u8 duplex; @@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) /* process all the async queue messages */ while (tail != hdev->arq.head) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, + "vf crq need init in async\n"); + return; + } + msg_q = hdev->arq.msg_q[hdev->arq.head]; switch (msg_q[0]) { @@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - hdev->reset_level = HNAE3_VF_RESET; + reset_type = le16_to_cpu(msg_q[1]); + set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); |