summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c953
1 files changed, 602 insertions, 351 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fee1f2918ead..a7a430a7be2c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -54,6 +54,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/vxlan.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -71,7 +72,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.4.0-k"
+#define DRV_VERSION "5.0.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2016 Intel Corporation.";
@@ -85,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
+ [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -139,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
/* required last entry */
{0, }
};
@@ -179,6 +183,7 @@ MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
@@ -606,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state "
- "trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
+ "trans_start\n");
+ pr_info("%-15s %016lX %016lX\n",
netdev->name,
netdev->state,
- dev_trans_start(netdev),
- netdev->last_rx);
+ dev_trans_start(netdev));
}
/* Print Registers */
@@ -941,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
- struct ixgbe_tx_buffer *tx_buffer)
-{
- if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buffer, len)) {
- dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- }
- tx_buffer->next_to_watch = NULL;
- tx_buffer->skb = NULL;
- dma_unmap_len_set(tx_buffer, len, 0);
- /* tx_buffer must be completely set up in the transmit path */
-}
-
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1194,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
DMA_TO_DEVICE);
/* clear tx_buffer data */
- tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */
@@ -1548,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
}
}
+static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
+}
+
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
@@ -1566,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
/*
* if mapping failed free memory back to system since
@@ -1582,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ixgbe_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
return true;
}
@@ -1597,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -1606,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = ixgbe_rx_bufsz(rx_ring);
+
do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+
/*
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
@@ -1625,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -1716,11 +1713,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
- skb_mark_napi_id(skb, &q_vector->napi);
- if (ixgbe_qv_busy_polling(q_vector))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -1832,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
{
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
+ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
- ixgbe_rx_bufsz(rx_ring),
+ skb_frag_size(frag),
DMA_FROM_DEVICE);
}
- IXGBE_CB(skb)->dma = 0;
}
/**
@@ -1880,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
}
/* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
+ if (!skb_headlen(skb))
ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
@@ -1915,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static inline bool ixgbe_page_is_reserved(struct page *page)
@@ -1930,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote pages */
+ if (unlikely(ixgbe_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ return false;
+#else
+ /* The last offset is a bit aggressive in that we assume the
+ * worst case of FCoE being enabled and using a 3K buffer.
+ * However this should have minimal impact as the 1K extra is
+ * still less than one buffer in size.
+ */
+#define IXGBE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
+ if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
/**
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -1945,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
**/
-static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
- ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
#endif
-
- if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ixgbe_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, ixgbe_rx_pg_order(rx_ring));
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
-
- /* avoid re-using remote pages */
- if (unlikely(ixgbe_page_is_reserved(page)))
- return false;
-
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
- return false;
#endif
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- page_ref_inc(page);
-
- return true;
}
-static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc)
+static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff **skb,
+ const unsigned int size)
{
struct ixgbe_rx_buffer *rx_buffer;
- struct sk_buff *skb;
- struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ prefetchw(rx_buffer->page);
+ *skb = rx_buffer->skb;
- skb = rx_buffer->skb;
+ /* Delay unmapping of the first packet. It carries the header
+ * information, HW may still access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
+ if (!*skb)
+ goto skip_sync;
+ } else {
+ if (*skb)
+ ixgbe_dma_sync_frag(rx_ring, *skb);
+ }
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+skip_sync:
+ rx_buffer->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ return rx_buffer;
+}
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi,
- IXGBE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- return NULL;
+static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ struct sk_buff *skb)
+{
+ if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ /* the page has been released from the ring */
+ IXGBE_CB(skb)->page_released = true;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
}
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+}
- /*
- * Delay unmapping of the first packet. It carries the
- * header information, HW may still access the header
- * after the writeback. Only unmap it when EOP is
- * reached
- */
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
- goto dma_sync;
+static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ struct sk_buff *skb;
- IXGBE_CB(skb)->dma = rx_buffer->dma;
- } else {
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
- ixgbe_dma_sync_frag(rx_ring, skb);
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
-dma_sync:
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
- rx_buffer->skb = NULL;
- }
+ if (size > IXGBE_RX_HDR_SIZE) {
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
- /* pull page into skb */
- if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ixgbe_reuse_rx_page(rx_ring, rx_buffer);
- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
- /* the page has been released from the ring */
- IXGBE_CB(skb)->page_released = true;
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset,
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+ rx_buffer->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
+ return skb;
+}
+
+static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* build an skb to around the page buffer */
+ skb = build_skb(va - IXGBE_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IXGBE_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* record DMA address if this is the start of a chain of buffers */
+ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
+
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
return skb;
}
@@ -2114,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -2123,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.status_error)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -2133,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
/* retrieve a buffer from the ring */
- skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
+ if (skb)
+ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = ixgbe_build_skb(rx_ring, rx_buffer,
+ rx_desc, size);
+ else
+ skb = ixgbe_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
- if (!skb)
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
cleaned_count++;
/* place incomplete frames back on ring for completion */
@@ -2197,40 +2270,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbe_low_latency_recv(struct napi_struct *napi)
-{
- struct ixgbe_q_vector *q_vector =
- container_of(napi, struct ixgbe_q_vector, napi);
- struct ixgbe_adapter *adapter = q_vector->adapter;
- struct ixgbe_ring *ring;
- int found = 0;
-
- if (test_bit(__IXGBE_DOWN, &adapter->state))
- return LL_FLUSH_FAILED;
-
- if (!ixgbe_qv_lock_poll(q_vector))
- return LL_FLUSH_BUSY;
-
- ixgbe_for_each_ring(ring, q_vector->rx) {
- found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
- if (found)
- ring->stats.cleaned += found;
- else
- ring->stats.misses++;
-#endif
- if (found)
- break;
- }
-
- ixgbe_qv_unlock_poll(q_vector);
-
- return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/**
* ixgbe_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -2446,6 +2485,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
+ s32 rc;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
@@ -2484,6 +2524,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
return;
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ rc = hw->phy.ops.check_overtemp(hw);
+ if (rc != IXGBE_ERR_OVERTEMP)
+ return;
+ break;
default:
if (adapter->hw.mac.type >= ixgbe_mac_X540)
return;
@@ -2530,6 +2576,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
return;
}
return;
+ case ixgbe_mac_x550em_a:
+ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+ IXGBE_EICR_GPI_SDP0_X550EM_a);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP0_X550EM_a);
+ }
+ return;
+ case ixgbe_mac_X550:
case ixgbe_mac_X540:
if (!(eicr & IXGBE_EICR_TS))
return;
@@ -2855,8 +2913,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
- /* Exit if we are called by netpoll or busy polling is active */
- if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
+ /* Exit if we are called by netpoll */
+ if (budget <= 0)
return budget;
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -2875,7 +2933,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
clean_complete = false;
}
- ixgbe_qv_unlock_napi(q_vector);
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -3070,6 +3127,9 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
return;
}
+ if (!adapter->msix_entries)
+ return;
+
for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -3210,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct ixgbe_tx_buffer) * ring->count);
+
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
@@ -3380,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+ srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3407,6 +3474,21 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_store_key - Write the RSS key to HW
+ * @adapter: device handle
+ *
+ * Write the RSS key stored in adapter.rss_key to HW.
+ */
+void ixgbe_store_key(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
+}
+
+/**
* ixgbe_store_reta - Write the RETA table to HW
* @adapter: device handle
*
@@ -3471,7 +3553,6 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
u32 i, j;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
@@ -3484,8 +3565,7 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
rss_i = 4;
/* Fill out hash function seeds */
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
+ ixgbe_store_key(adapter);
/* Fill out redirection table */
memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
@@ -3681,6 +3761,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
@@ -3713,8 +3794,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
*/
rxdctl &= ~0x3FFFFF;
rxdctl |= 0x080420;
+#if (PAGE_SIZE < 8192)
+ } else {
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
+
+ /* Limit the maximum frame size so we don't overrun the skb */
+ if (ring_uses_build_skb(ring) &&
+ !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+ rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+ IXGBE_RXDCTL_RLPML_EN;
+#endif
}
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct ixgbe_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IXGBE_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor ring */
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
@@ -3851,10 +3951,31 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
+
+ clear_ring_rsc_enabled(rx_ring);
+ clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
- else
- clear_ring_rsc_enabled(rx_ring);
+
+ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ continue;
+
+ set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+ if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
+ (max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+#endif
}
}
@@ -4555,23 +4676,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
- ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_enable(&adapter->q_vector[q_idx]->napi);
- }
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
- pr_info("QV %d locked\n", q_idx);
- usleep_range(1000, 20000);
- }
- }
}
static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
@@ -4875,45 +4989,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
**/
static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
+ u16 i = rx_ring->next_to_clean;
+ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
-
+ while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
if (IXGBE_CB(skb)->page_released)
- dma_unmap_page(dev,
- IXGBE_CB(skb)->dma,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
dev_kfree_skb(skb);
- rx_buffer->skb = NULL;
}
- if (!rx_buffer->page)
- continue;
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
- rx_buffer->page = NULL;
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
}
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5012,24 +5128,23 @@ fwd_queue_err:
return err;
}
-static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
{
- struct net_device *upper;
- struct list_head *iter;
- int err;
-
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
- if (dfwd->fwd_priv) {
- err = ixgbe_fwd_ring_up(upper, vadapter);
- if (err)
- continue;
- }
- }
+ if (dfwd->fwd_priv)
+ ixgbe_fwd_ring_up(upper, vadapter);
}
+
+ return 0;
+}
+
+static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+{
+ netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ ixgbe_upper_dev_walk, NULL);
}
static void ixgbe_configure(struct ixgbe_adapter *adapter)
@@ -5291,6 +5406,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
+ if (adapter->hw.phy.type == ixgbe_phy_fw)
+ ixgbe_watchdog_link_is_down(adapter);
ixgbe_down(adapter);
/*
* If SR-IOV enabled then wait a bit before bringing the adapter
@@ -5381,28 +5498,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
**/
static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
- struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned long size;
- u16 i;
+ u16 i = tx_ring->next_to_clean;
+ struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- /* ring already cleared, nothing to do */
- if (!tx_ring->tx_buffer_info)
- return;
+ while (i != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
- netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- memset(tx_ring->tx_buffer_info, 0, size);
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
- /* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
}
@@ -5448,12 +5594,25 @@ static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
+static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
+{
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv) {
+ netif_tx_stop_all_queues(upper);
+ netif_carrier_off(upper);
+ netif_tx_disable(upper);
+ }
+ }
+
+ return 0;
+}
+
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *upper;
- struct list_head *iter;
int i;
/* signal that we are down to the interrupt handler */
@@ -5477,17 +5636,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_tx_disable(netdev);
/* disable any upper devices */
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv) {
- netif_tx_stop_all_queues(upper);
- netif_carrier_off(upper);
- netif_tx_disable(upper);
- }
- }
- }
+ netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ ixgbe_disable_macvlan, NULL);
ixgbe_irq_disable(adapter);
@@ -5547,6 +5697,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * @adapter: board private structure
+ */
+static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ if (!hw->phy.eee_speeds_supported)
+ break;
+ adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
+ if (!hw->phy.eee_speeds_advertised)
+ break;
+ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+ break;
+ default:
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+ break;
+ }
+}
+
+/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
@@ -5618,7 +5793,8 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
+static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -5634,6 +5810,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ /* get_invariants needs the device IDs */
+ ii->get_invariants(hw);
+
/* Set common capability flags and settings */
rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
@@ -5706,6 +5885,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_x550em_a:
adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ break;
+ default:
+ break;
+ }
/* fall through */
case ixgbe_mac_X550EM_x:
#ifdef CONFIG_IXGBE_DCB
@@ -5719,6 +5906,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/* Fall Through */
case ixgbe_mac_X550:
+ if (hw->mac.type == ixgbe_mac_X550)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
@@ -5805,9 +5994,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
if (tx_ring->q_vector)
ring_node = tx_ring->q_vector->numa_node;
- tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
+ tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -5889,9 +6078,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
if (rx_ring->q_vector)
ring_node = rx_ring->q_vector->numa_node;
- rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
+ rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
@@ -6049,11 +6238,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- /* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
- return -EINVAL;
/*
* For 82599EB we cannot allow legacy VFs to enable their receive
@@ -6062,7 +6246,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+ (new_mtu > ETH_DATA_LEN))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -6194,7 +6378,8 @@ int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_close_suspend(adapter);
+ if (netif_device_present(netdev))
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
@@ -6239,14 +6424,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- rtnl_unlock();
-
- if (err)
- return err;
- netif_device_attach(netdev);
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
- return 0;
+ return err;
}
#endif /* CONFIG_PM */
@@ -6261,14 +6444,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
int retval = 0;
#endif
+ rtnl_lock();
netif_device_detach(netdev);
- rtnl_lock();
if (netif_running(netdev))
ixgbe_close_suspend(adapter);
- rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
+ rtnl_unlock();
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -6728,6 +6911,18 @@ static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
#endif
}
+static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
+{
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_wake_all_queues(upper);
+ }
+
+ return 0;
+}
+
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -6737,8 +6932,6 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *upper;
- struct list_head *iter;
u32 link_speed = adapter->link_speed;
const char *speed_str;
bool flow_rx, flow_tx;
@@ -6792,6 +6985,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case IXGBE_LINK_SPEED_100_FULL:
speed_str = "100 Mbps";
break;
+ case IXGBE_LINK_SPEED_10_FULL:
+ speed_str = "10 Mbps";
+ break;
default:
speed_str = "unknown speed";
break;
@@ -6809,14 +7005,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/* enable any upper devices */
rtnl_lock();
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_wake_all_queues(upper);
- }
- }
+ netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ ixgbe_enable_macvlan, NULL);
rtnl_unlock();
/* update the default user priority for VFs */
@@ -7605,18 +7795,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- for (;;) {
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i--)
+ i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer == first)
- break;
- if (i == 0)
- i = tx_ring->count;
- i--;
}
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
tx_ring->next_to_use = i;
}
@@ -7655,11 +7859,17 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
+ if (unlikely(hdr.network <= skb->data))
+ return;
if (skb->encapsulation &&
first->protocol == htons(ETH_P_IP) &&
- hdr.ipv4->protocol != IPPROTO_UDP) {
+ hdr.ipv4->protocol == IPPROTO_UDP) {
struct ixgbe_adapter *adapter = q_vector->adapter;
+ if (unlikely(skb_tail_pointer(skb) < hdr.network +
+ VXLAN_HEADROOM))
+ return;
+
/* verify the port is recognized as VXLAN */
if (adapter->vxlan_port &&
udp_hdr(skb)->dest == adapter->vxlan_port)
@@ -7670,6 +7880,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.network = skb_inner_network_header(skb);
}
+ /* Make sure we have at least [minimum IPv4 header + TCP]
+ * or [IPv6 header] bytes
+ */
+ if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
+ return;
+
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
case IPVERSION:
@@ -7689,6 +7905,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
if (l4_proto != IPPROTO_TCP)
return;
+ if (unlikely(skb_tail_pointer(skb) < hdr.network +
+ hlen + sizeof(struct tcphdr)))
+ return;
+
th = (struct tcphdr *)(hdr.network + hlen);
/* skip this packet since the socket is closing */
@@ -8085,8 +8305,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
}
#endif
-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+
+static void ixgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
@@ -8124,13 +8345,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
}
rcu_read_unlock();
+
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
stats->rx_errors = netdev->stats.rx_errors;
stats->rx_length_errors = netdev->stats.rx_length_errors;
stats->rx_crc_errors = netdev->stats.rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- return stats;
}
#ifdef CONFIG_IXGBE_DCB
@@ -8354,12 +8575,38 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
}
#ifdef CONFIG_NET_CLS_ACT
+struct upper_walk_data {
+ struct ixgbe_adapter *adapter;
+ u64 action;
+ int ifindex;
+ u8 queue;
+};
+
+static int get_macvlan_queue(struct net_device *upper, void *_data)
+{
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+ struct upper_walk_data *data = _data;
+ struct ixgbe_adapter *adapter = data->adapter;
+ int ifindex = data->ifindex;
+
+ if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+ data->action = data->queue;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
u8 *queue, u64 *action)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ struct upper_walk_data data;
struct net_device *upper;
- struct list_head *iter;
/* redirect to a SRIOV VF */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -8377,17 +8624,16 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
}
/* redirect to a offloaded macvlan netdev */
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *dfwd = netdev_priv(upper);
- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
-
- if (vadapter && vadapter->netdev->ifindex == ifindex) {
- *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
- *action = *queue;
- return 0;
- }
- }
+ data.adapter = adapter;
+ data.ifindex = ifindex;
+ data.action = 0;
+ data.queue = 0;
+ if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
+ get_macvlan_queue, &data)) {
+ *action = data.action;
+ *queue = data.queue;
+
+ return 0;
}
return -EINVAL;
@@ -8414,7 +8660,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
}
/* Redirect to a VF or a offloaded macvlan */
- if (is_tcf_mirred_redirect(a)) {
+ if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
err = handle_redirect_action(adapter, ifindex, queue,
@@ -9239,9 +9485,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = ixgbe_low_latency_recv,
-#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
.ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
@@ -9481,6 +9724,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
hw->mvals = ii->mvals;
+ if (ii->link_ops)
+ hw->link.ops = *ii->link_ops;
/* EEPROM */
hw->eeprom.ops = *ii->eeprom_ops;
@@ -9504,10 +9749,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
- ii->get_invariants(hw);
-
/* setup the private structure */
- err = ixgbe_sw_init(adapter);
+ err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
@@ -9545,6 +9788,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
+ ixgbe_set_eee_capable(adapter);
if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -9616,9 +9860,13 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* MTU range: 68 - 9710 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
- netdev->dcbnl_ops = &dcbnl_ops;
+ netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9778,8 +10026,9 @@ skip_sriov:
* since os does not support feature
*/
if (hw->mac.ops.set_fw_drv_ver)
- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
- 0xFF);
+ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
+ sizeof(ixgbe_driver_version) - 1,
+ ixgbe_driver_version);
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -10027,7 +10276,7 @@ skip_bad_vf_detection:
}
if (netif_running(netdev))
- ixgbe_down(adapter);
+ ixgbe_close_suspend(adapter);
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
pci_disable_device(pdev);
@@ -10097,10 +10346,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
}
#endif
+ rtnl_lock();
if (netif_running(netdev))
- ixgbe_up(adapter);
+ ixgbe_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
}
static const struct pci_error_handlers ixgbe_err_handler = {