diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 224 |
1 files changed, 148 insertions, 76 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f113138df0d9..ff1cbfc834b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -388,9 +388,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ - if ((priv->hw->pcs == STMMAC_PCS_RGMII) || - (priv->hw->pcs == STMMAC_PCS_TBI) || - (priv->hw->pcs == STMMAC_PCS_RTBI)) + if (priv->hw->pcs == STMMAC_PCS_TBI || + priv->hw->pcs == STMMAC_PCS_RTBI) return false; /* Check if MAC core supports the EEE feature. */ @@ -1090,6 +1089,8 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) if (priv->extend_desc) head_tx = (void *)tx_q->dma_etx; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + head_tx = (void *)tx_q->dma_entx; else head_tx = (void *)tx_q->dma_tx; @@ -1163,13 +1164,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) int i; /* Clear the TX descriptors */ - for (i = 0; i < DMA_TX_SIZE; i++) + for (i = 0; i < DMA_TX_SIZE; i++) { + int last = (i == (DMA_TX_SIZE - 1)); + struct dma_desc *p; + if (priv->extend_desc) - stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, - priv->mode, (i == DMA_TX_SIZE - 1)); + p = &tx_q->dma_etx[i].basic; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[i].basic; else - stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], - priv->mode, (i == DMA_TX_SIZE - 1)); + p = &tx_q->dma_tx[i]; + + stmmac_init_tx_desc(priv, p, priv->mode, last); + } } /** @@ -1383,7 +1390,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) if (priv->extend_desc) stmmac_mode_init(priv, tx_q->dma_etx, tx_q->dma_tx_phy, DMA_TX_SIZE, 1); - else + else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) stmmac_mode_init(priv, tx_q->dma_tx, tx_q->dma_tx_phy, DMA_TX_SIZE, 0); } @@ -1392,6 +1399,8 @@ static int init_dma_tx_desc_rings(struct net_device *dev) struct dma_desc *p; if (priv->extend_desc) p = &((tx_q->dma_etx + i)->basic); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &((tx_q->dma_entx + i)->basic); else p = tx_q->dma_tx + i; @@ -1511,19 +1520,26 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv) /* Free TX queue resources */ for (queue = 0; queue < tx_count; queue++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; /* Release the DMA TX socket buffers */ dma_free_tx_skbufs(priv, queue); - /* Free DMA regions of consistent memory previously allocated */ - if (!priv->extend_desc) - dma_free_coherent(priv->device, - DMA_TX_SIZE * sizeof(struct dma_desc), - tx_q->dma_tx, tx_q->dma_tx_phy); - else - dma_free_coherent(priv->device, DMA_TX_SIZE * - sizeof(struct dma_extended_desc), - tx_q->dma_etx, tx_q->dma_tx_phy); + if (priv->extend_desc) { + size = sizeof(struct dma_extended_desc); + addr = tx_q->dma_etx; + } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { + size = sizeof(struct dma_edesc); + addr = tx_q->dma_entx; + } else { + size = sizeof(struct dma_desc); + addr = tx_q->dma_tx; + } + + size *= DMA_TX_SIZE; + + dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); kfree(tx_q->tx_skbuff_dma); kfree(tx_q->tx_skbuff); @@ -1616,6 +1632,8 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) /* TX queues buffers and DMA */ for (queue = 0; queue < tx_count; queue++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + size_t size; + void *addr; tx_q->queue_index = queue; tx_q->priv_data = priv; @@ -1632,28 +1650,32 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) if (!tx_q->tx_skbuff) goto err_dma; - if (priv->extend_desc) { - tx_q->dma_etx = dma_alloc_coherent(priv->device, - DMA_TX_SIZE * sizeof(struct dma_extended_desc), - &tx_q->dma_tx_phy, - GFP_KERNEL); - if (!tx_q->dma_etx) - goto err_dma; - } else { - tx_q->dma_tx = dma_alloc_coherent(priv->device, - DMA_TX_SIZE * sizeof(struct dma_desc), - &tx_q->dma_tx_phy, - GFP_KERNEL); - if (!tx_q->dma_tx) - goto err_dma; - } + if (priv->extend_desc) + size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + size = sizeof(struct dma_edesc); + else + size = sizeof(struct dma_desc); + + size *= DMA_TX_SIZE; + + addr = dma_alloc_coherent(priv->device, size, + &tx_q->dma_tx_phy, GFP_KERNEL); + if (!addr) + goto err_dma; + + if (priv->extend_desc) + tx_q->dma_etx = addr; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + tx_q->dma_entx = addr; + else + tx_q->dma_tx = addr; } return 0; err_dma: free_dma_tx_desc_resources(priv); - return ret; } @@ -1885,6 +1907,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) if (priv->extend_desc) p = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[entry].basic; else p = tx_q->dma_tx + entry; @@ -1983,19 +2007,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; - int i; netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); stmmac_stop_tx_dma(priv, chan); dma_free_tx_skbufs(priv, chan); - for (i = 0; i < DMA_TX_SIZE; i++) - if (priv->extend_desc) - stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, - priv->mode, (i == DMA_TX_SIZE - 1)); - else - stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], - priv->mode, (i == DMA_TX_SIZE - 1)); + stmmac_clear_tx_descriptors(priv, chan); tx_q->dirty_tx = 0; tx_q->cur_tx = 0; tx_q->mss = 0; @@ -2632,6 +2649,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->dma_cap.vlins) stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); + /* TBS */ + for (chan = 0; chan < tx_cnt; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + int enable = tx_q->tbs & STMMAC_TBS_AVAIL; + + stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); + } + /* Start the ball rolling... */ stmmac_start_all_dma(priv); @@ -2661,8 +2686,7 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; - if (priv->hw->pcs != STMMAC_PCS_RGMII && - priv->hw->pcs != STMMAC_PCS_TBI && + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { ret = stmmac_init_phy(dev); if (ret) { @@ -2689,6 +2713,16 @@ static int stmmac_open(struct net_device *dev) priv->rx_copybreak = STMMAC_RX_COPYBREAK; + /* Earlier check for TBS */ + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; + if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) + tx_q->tbs &= ~STMMAC_TBS_AVAIL; + } + ret = alloc_dma_desc_resources(priv); if (ret < 0) { netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", @@ -2837,7 +2871,11 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, tag = skb_vlan_tag_get(skb); - p = tx_q->dma_tx + tx_q->cur_tx; + if (tx_q->tbs & STMMAC_TBS_AVAIL) + p = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + p = &tx_q->dma_tx[tx_q->cur_tx]; + if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) return false; @@ -2872,7 +2910,11 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); - desc = tx_q->dma_tx + tx_q->cur_tx; + + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + desc = &tx_q->dma_tx[tx_q->cur_tx]; curr_addr = des + (total_len - tmp_len); if (priv->dma_cap.addr64 <= 32) @@ -2923,13 +2965,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); + int desc_size, tmp_pay_len = 0, first_tx; int nfrags = skb_shinfo(skb)->nr_frags; u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, tx_packets; - int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; - u8 proto_hdr_len, hdr; bool has_vlan, set_ic; + u8 proto_hdr_len, hdr; u32 pay_len, mss; dma_addr_t des; int i; @@ -2966,7 +3008,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* set new MSS value if needed */ if (mss != tx_q->mss) { - mss_desc = tx_q->dma_tx + tx_q->cur_tx; + if (tx_q->tbs & STMMAC_TBS_AVAIL) + mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; + stmmac_set_mss(priv, mss_desc, mss); tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); @@ -2986,7 +3032,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) first_entry = tx_q->cur_tx; WARN_ON(tx_q->tx_skbuff[first_entry]); - desc = tx_q->dma_tx + first_entry; + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[first_entry].basic; + else + desc = &tx_q->dma_tx[first_entry]; first = desc; if (has_vlan) @@ -3058,7 +3107,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) set_ic = false; if (set_ic) { - desc = &tx_q->dma_tx[tx_q->cur_tx]; + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[tx_q->cur_tx].basic; + else + desc = &tx_q->dma_tx[tx_q->cur_tx]; + tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; @@ -3121,16 +3174,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, tx_q->cur_tx, first, nfrags); - - stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); - pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb_headlen(skb)); } netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); + if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_edesc); + else + desc_size = sizeof(struct dma_desc); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); stmmac_tx_timer_arm(priv, queue); @@ -3160,10 +3215,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; + struct dma_edesc *tbs_desc = NULL; + int entry, desc_size, first_tx; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; - int entry, first_tx; dma_addr_t des; tx_q = &priv->tx_queue[queue]; @@ -3203,6 +3259,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; else desc = tx_q->dma_tx + entry; @@ -3232,6 +3290,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; else desc = tx_q->dma_tx + entry; @@ -3278,6 +3338,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (set_ic) { if (likely(priv->extend_desc)) desc = &tx_q->dma_etx[entry].basic; + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc = &tx_q->dma_entx[entry].basic; else desc = &tx_q->dma_tx[entry]; @@ -3295,20 +3357,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { - void *tx_head; - netdev_dbg(priv->dev, "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, entry, first, nfrags); - if (priv->extend_desc) - tx_head = (void *)tx_q->dma_etx; - else - tx_head = (void *)tx_q->dma_tx; - - stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); - netdev_dbg(priv->dev, ">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } @@ -3354,12 +3407,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Prepare the first descriptor setting the OWN bit too */ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, - csum_insertion, priv->mode, 1, last_segment, + csum_insertion, priv->mode, 0, last_segment, skb->len); - } else { - stmmac_set_tx_owner(priv, first); } + if (tx_q->tbs & STMMAC_TBS_EN) { + struct timespec64 ts = ns_to_timespec64(skb->tstamp); + + tbs_desc = &tx_q->dma_entx[first_entry]; + stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); + } + + stmmac_set_tx_owner(priv, first); + /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. @@ -3370,7 +3430,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_enable_dma_transmission(priv, priv->ioaddr); - tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); + if (likely(priv->extend_desc)) + desc_size = sizeof(struct dma_extended_desc); + else if (tx_q->tbs & STMMAC_TBS_AVAIL) + desc_size = sizeof(struct dma_edesc); + else + desc_size = sizeof(struct dma_desc); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); stmmac_tx_timer_arm(priv, queue); @@ -4090,6 +4157,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, return stmmac_tc_setup_cbs(priv, priv, type_data); case TC_SETUP_QDISC_TAPRIO: return stmmac_tc_setup_taprio(priv, priv, type_data); + case TC_SETUP_QDISC_ETF: + return stmmac_tc_setup_etf(priv, priv, type_data); default: return -EOPNOTSUPP; } @@ -4193,7 +4262,7 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_etx, DMA_TX_SIZE, 1, seq); - } else { + } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, 0, seq); @@ -4294,6 +4363,12 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) priv->dma_cap.l3l4fnum); seq_printf(seq, "\tARP Offloading: %s\n", priv->dma_cap.arpoffsel ? "Y" : "N"); + seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", + priv->dma_cap.estsel ? "Y" : "N"); + seq_printf(seq, "\tFrame Preemption (FPE): %s\n", + priv->dma_cap.fpesel ? "Y" : "N"); + seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", + priv->dma_cap.tbssel ? "Y" : "N"); return 0; } DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); @@ -4799,8 +4874,7 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); - if (priv->hw->pcs != STMMAC_PCS_RGMII && - priv->hw->pcs != STMMAC_PCS_TBI && + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) { /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); @@ -4834,8 +4908,7 @@ int stmmac_dvr_probe(struct device *device, error_netdev_register: phylink_destroy(priv->phylink); error_phy_setup: - if (priv->hw->pcs != STMMAC_PCS_RGMII && - priv->hw->pcs != STMMAC_PCS_TBI && + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: @@ -4880,8 +4953,7 @@ int stmmac_dvr_remove(struct device *dev) reset_control_assert(priv->plat->stmmac_rst); clk_disable_unprepare(priv->plat->pclk); clk_disable_unprepare(priv->plat->stmmac_clk); - if (priv->hw->pcs != STMMAC_PCS_RGMII && - priv->hw->pcs != STMMAC_PCS_TBI && + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); |