diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-05-01 08:47:44 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-05-01 08:47:44 -0700 |
commit | bf61c8840efe60fd8f91446860b63338fb424158 (patch) | |
tree | 7a71832407a4f0d6346db773343f4c3ae2257b19 /drivers/net/ethernet/intel/igb/igb_main.c | |
parent | 5846115b30f3a881e542c8bfde59a699c1c13740 (diff) | |
parent | 0c6a61657da78098472fd0eb71cc01f2387fa1bb (diff) |
Merge branch 'next' into for-linus
Prepare first set of updates for 3.10 merge window.
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 2287 |
1 files changed, 1555 insertions, 732 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e1ceb37ef12e..4dbd62968c7a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -57,18 +57,20 @@ #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif +#include <linux/i2c.h> #include "igb.h" #define MAJ 4 -#define MIN 0 -#define BUILD 1 +#define MIN 1 +#define BUILD 2 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation."; +static const char igb_copyright[] = + "Copyright (c) 2007-2013 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, @@ -118,10 +120,11 @@ static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); -static void __devexit igb_remove(struct pci_dev *pdev); +static void igb_remove(struct pci_dev *pdev); static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); @@ -192,6 +195,7 @@ static const struct dev_pm_ops igb_pm_ops = { }; #endif static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); #ifdef CONFIG_IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { @@ -228,11 +232,12 @@ static struct pci_driver igb_driver = { .name = igb_driver_name, .id_table = igb_pci_tbl, .probe = igb_probe, - .remove = __devexit_p(igb_remove), + .remove = igb_remove, #ifdef CONFIG_PM .driver.pm = &igb_pm_ops, #endif .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, .err_handler = &igb_err_handler }; @@ -534,31 +539,27 @@ rx_ring_summary: if (staterr & E1000_RXD_STAT_DD) { /* Descriptor Done */ - pr_info("%s[0x%03X] %016llX %016llX -------" - "--------- %p%s\n", "RWB", i, + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - buffer_info->skb, next_desc); + next_desc); } else { - pr_info("%s[0x%03X] %016llX %016llX %016llX" - " %p%s\n", "R ", i, + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), (u64)buffer_info->dma, - buffer_info->skb, next_desc); + next_desc); if (netif_msg_pktdata(adapter) && - buffer_info->dma && buffer_info->skb) { - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, buffer_info->skb->data, - IGB_RX_HDR_LEN, true); + buffer_info->dma && buffer_info->page) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, page_address(buffer_info->page) + buffer_info->page_offset, - PAGE_SIZE/2, true); + IGB_RX_BUFSZ, true); } } } @@ -568,6 +569,91 @@ exit: return; } +/* igb_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + */ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return ((i2cctl & E1000_I2C_DATA_IN) != 0); +} + +/* igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + */ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) + i2cctl |= E1000_I2C_DATA_OUT; + else + i2cctl &= ~E1000_I2C_DATA_OUT; + + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl |= E1000_I2C_CLK_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); + +} + +/* igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + */ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/* igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + */ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return ((i2cctl & E1000_I2C_CLK_IN) != 0); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + /** * igb_get_hw_dev - return device * used by hardware layer to print debugging information @@ -656,80 +742,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) } } -static void igb_free_queues(struct igb_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - kfree(adapter->tx_ring[i]); - adapter->tx_ring[i] = NULL; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - kfree(adapter->rx_ring[i]); - adapter->rx_ring[i] = NULL; - } - adapter->num_rx_queues = 0; - adapter->num_tx_queues = 0; -} - -/** - * igb_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. - **/ -static int igb_alloc_queues(struct igb_adapter *adapter) -{ - struct igb_ring *ring; - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); - if (!ring) - goto err; - ring->count = adapter->tx_ring_count; - ring->queue_index = i; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - /* For 82575, context index must be unique per ring. */ - if (adapter->hw.mac.type == e1000_82575) - set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); - adapter->tx_ring[i] = ring; - } - - for (i = 0; i < adapter->num_rx_queues; i++) { - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); - if (!ring) - goto err; - ring->count = adapter->rx_ring_count; - ring->queue_index = i; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - /* set flag indicating ring supports SCTP checksum offload */ - if (adapter->hw.mac.type >= e1000_82576) - set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); - - /* - * On i350, i210, and i211, loopback VLAN packets - * have the tag byte-swapped. - * */ - if (adapter->hw.mac.type >= e1000_i350) - set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); - - adapter->rx_ring[i] = ring; - } - - igb_cache_ring_register(adapter); - - return 0; - -err: - igb_free_queues(adapter); - - return -ENOMEM; -} - /** * igb_write_ivar - configure ivar for given MSI-X vector * @hw: pointer to the HW structure @@ -909,17 +921,18 @@ static int igb_request_msix(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - int i, err = 0, vector = 0; + int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, igb_msix_other, 0, netdev->name, adapter); if (err) - goto out; - vector++; + goto err_out; for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; + vector++; + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); if (q_vector->rx.ring && q_vector->tx.ring) @@ -938,13 +951,22 @@ static int igb_request_msix(struct igb_adapter *adapter) igb_msix_ring, 0, q_vector->name, q_vector); if (err) - goto out; - vector++; + goto err_free; } igb_configure_msix(adapter); return 0; -out: + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: return err; } @@ -960,6 +982,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) } /** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + + /* + * ixgbe_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** * igb_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * @@ -969,17 +1020,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) **/ static void igb_free_q_vectors(struct igb_adapter *adapter) { - int v_idx; + int v_idx = adapter->num_q_vectors; - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; - adapter->q_vector[v_idx] = NULL; - if (!q_vector) - continue; - netif_napi_del(&q_vector->napi); - kfree(q_vector); - } + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); } /** @@ -990,7 +1038,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter) */ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) { - igb_free_queues(adapter); igb_free_q_vectors(adapter); igb_reset_interrupt_capability(adapter); } @@ -1001,11 +1048,14 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. **/ -static int igb_set_interrupt_capability(struct igb_adapter *adapter) +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) { int err; int numvecs, i; + if (!msix) + goto msi_only; + /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; if (adapter->vfs_allocated_count) @@ -1038,7 +1088,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) adapter->msix_entries, numvecs); if (err == 0) - goto out; + return; igb_reset_interrupt_capability(adapter); @@ -1068,105 +1118,183 @@ msi_only: adapter->num_q_vectors = 1; if (!pci_enable_msi(adapter->pdev)) adapter->flags |= IGB_FLAG_HAS_MSI; -out: - /* Notify the stack of the (possibly) reduced queue counts. */ - rtnl_lock(); - netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - err = netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); - rtnl_unlock(); - return err; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; } /** - * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * igb_alloc_q_vector - Allocate memory for a single interrupt vector * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. + * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int igb_alloc_q_vectors(struct igb_adapter *adapter) +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) { struct igb_q_vector *q_vector; - struct e1000_hw *hw = &adapter->hw; - int v_idx; + struct igb_ring *ring; + int ring_count, size; - for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - q_vector = kzalloc(sizeof(struct igb_q_vector), - GFP_KERNEL); - if (!q_vector) - goto err_out; - q_vector->adapter = adapter; - q_vector->itr_register = hw->hw_addr + E1000_EITR(0); - q_vector->itr_val = IGB_START_ITR; - netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); - adapter->q_vector[v_idx] = q_vector; + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct igb_q_vector) + + (sizeof(struct igb_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; } - return 0; + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; -err_out: - igb_free_q_vectors(adapter); - return -ENOMEM; -} + /* configure backlink on ring */ + ring->q_vector = q_vector; -static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); - q_vector->rx.ring = adapter->rx_ring[ring_idx]; - q_vector->rx.ring->q_vector = q_vector; - q_vector->rx.count++; - q_vector->itr_val = adapter->rx_itr_setting; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; -} + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); -static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, - int ring_idx, int v_idx) -{ - struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + /* + * On i350, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + * */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); - q_vector->tx.ring = adapter->tx_ring[ring_idx]; - q_vector->tx.ring->q_vector = q_vector; - q_vector->tx.count++; - q_vector->itr_val = adapter->tx_itr_setting; - q_vector->tx.work_limit = adapter->tx_work_limit; - if (q_vector->itr_val && q_vector->itr_val <= 3) - q_vector->itr_val = IGB_START_ITR; + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; } + /** - * igb_map_ring_to_vector - maps allocated queues to vectors + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize * - * This function maps the recently allocated queues to vectors. + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. **/ -static int igb_map_ring_to_vector(struct igb_adapter *adapter) +static int igb_alloc_q_vectors(struct igb_adapter *adapter) { - int i; - int v_idx = 0; + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; - if ((adapter->num_q_vectors < adapter->num_rx_queues) || - (adapter->num_q_vectors < adapter->num_tx_queues)) - return -ENOMEM; + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); - if (adapter->num_q_vectors >= - (adapter->num_rx_queues + adapter->num_tx_queues)) { - for (i = 0; i < adapter->num_rx_queues; i++) - igb_map_rx_ring_to_vector(adapter, i, v_idx++); - for (i = 0; i < adapter->num_tx_queues; i++) - igb_map_tx_ring_to_vector(adapter, i, v_idx++); - } else { - for (i = 0; i < adapter->num_rx_queues; i++) { - if (i < adapter->num_tx_queues) - igb_map_tx_ring_to_vector(adapter, i, v_idx); - igb_map_rx_ring_to_vector(adapter, i, v_idx++); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; } - for (; i < adapter->num_tx_queues; i++) - igb_map_tx_ring_to_vector(adapter, i, v_idx++); } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; } /** @@ -1174,14 +1302,12 @@ static int igb_map_ring_to_vector(struct igb_adapter *adapter) * * This function initializes the interrupts and allocates all of the queues. **/ -static int igb_init_interrupt_scheme(struct igb_adapter *adapter) +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) { struct pci_dev *pdev = adapter->pdev; int err; - err = igb_set_interrupt_capability(adapter); - if (err) - return err; + igb_set_interrupt_capability(adapter, msix); err = igb_alloc_q_vectors(adapter); if (err) { @@ -1189,24 +1315,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter) goto err_alloc_q_vectors; } - err = igb_alloc_queues(adapter); - if (err) { - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - err = igb_map_ring_to_vector(adapter); - if (err) { - dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); - goto err_map_queues; - } - + igb_cache_ring_register(adapter); return 0; -err_map_queues: - igb_free_queues(adapter); -err_alloc_queues: - igb_free_q_vectors(adapter); + err_alloc_q_vectors: igb_reset_interrupt_capability(adapter); return err; @@ -1229,29 +1341,17 @@ static int igb_request_irq(struct igb_adapter *adapter) if (!err) goto request_done; /* fall back to MSI */ - igb_clear_interrupt_scheme(adapter); - if (!pci_enable_msi(pdev)) - adapter->flags |= IGB_FLAG_HAS_MSI; igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; - adapter->num_q_vectors = 1; - err = igb_alloc_q_vectors(adapter); - if (err) { - dev_err(&pdev->dev, - "Unable to allocate memory for vectors\n"); - goto request_done; - } - err = igb_alloc_queues(adapter); - if (err) { - dev_err(&pdev->dev, - "Unable to allocate memory for queues\n"); - igb_free_q_vectors(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) goto request_done; - } + igb_setup_all_tx_resources(adapter); igb_setup_all_rx_resources(adapter); + igb_configure(adapter); } igb_assign_vector(adapter->q_vector[0], 0); @@ -1587,8 +1687,7 @@ void igb_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; - u32 pba = 0, tx_space, min_tx_space, min_rx_space; - u16 hwm; + u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -1663,7 +1762,7 @@ void igb_reset(struct igb_adapter *adapter) hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); - fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; fc->pause_time = 0xFFFF; fc->send_xon = 1; @@ -1698,6 +1797,18 @@ void igb_reset(struct igb_adapter *adapter) igb_force_mac_fc(hw); igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif if (!netif_running(adapter->netdev)) igb_power_down_link(adapter); @@ -1706,10 +1817,8 @@ void igb_reset(struct igb_adapter *adapter) /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); -#ifdef CONFIG_IGB_PTP /* Re-enable PTP, where applicable. */ igb_ptp_reset(adapter); -#endif /* CONFIG_IGB_PTP */ igb_get_phy_info(hw); } @@ -1783,61 +1892,64 @@ static const struct net_device_ops igb_netdev_ops = { void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; - u16 major, build, patch, fw_version; - u32 etrack_id; - - hw->nvm.ops.read(hw, 5, 1, &fw_version); - if (adapter->hw.mac.type != e1000_i211) { - hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); - hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); - etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl; - - /* combo image version needs to be found */ - hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); - if ((comb_offset != 0x0) && - (comb_offset != IGB_NVM_VER_INVALID)) { - hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset - + 1), 1, &comb_verh); - hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), - 1, &comb_verl); - - /* Only display Option Rom if it exists and is valid */ - if ((comb_verh && comb_verl) && - ((comb_verh != IGB_NVM_VER_INVALID) && - (comb_verl != IGB_NVM_VER_INVALID))) { - major = comb_verl >> IGB_COMB_VER_SHFT; - build = (comb_verl << IGB_COMB_VER_SHFT) | - (comb_verh >> IGB_COMB_VER_SHFT); - patch = comb_verh & IGB_COMB_VER_MASK; - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d%d, 0x%08x, %d.%d.%d", - (fw_version & IGB_MAJOR_MASK) >> - IGB_MAJOR_SHIFT, - (fw_version & IGB_MINOR_MASK) >> - IGB_MINOR_SHIFT, - (fw_version & IGB_BUILD_MASK), - etrack_id, major, build, patch); - goto out; - } - } - snprintf(adapter->fw_version, sizeof(adapter->fw_version), - "%d.%d%d, 0x%08x", - (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, - (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, - (fw_version & IGB_BUILD_MASK), etrack_id); - } else { + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i211: snprintf(adapter->fw_version, sizeof(adapter->fw_version), - "%d.%d%d", - (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, - (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, - (fw_version & IGB_BUILD_MASK)); + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, fw.invm_img_type); + break; + + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } + break; } -out: return; } +/* igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + * + */ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = E1000_SUCCESS; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return E1000_SUCCESS; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structue that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strlcpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -1849,8 +1961,7 @@ out: * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int __devinit igb_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct igb_adapter *adapter; @@ -1861,7 +1972,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; unsigned long mmio_start, mmio_len; int err, pci_using_dac; - u16 eeprom_apme_mask = IGB_EEPROM_APME; u8 part_str[E1000_PBANUM_LENGTH]; /* Catch broken hardware that put the wrong VF device ID in @@ -2040,9 +2150,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "NVM Read Error\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; @@ -2069,28 +2178,27 @@ static int __devinit igb_probe(struct pci_dev *pdev, igb_validate_mdi_setting(hw); - /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, - * enable the ACPI Magic Packet filter - */ - + /* By default, support wake on port A */ if (hw->bus.func == 0) - hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - else if (hw->mac.type >= e1000_82580) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); else if (hw->bus.func == 1) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); - if (eeprom_data & eeprom_apme_mask) - adapter->eeprom_wol |= E1000_WUFC_MAG; + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; /* now that we have the eeprom settings, apply the special cases where * the eeprom may be wrong or the board simply won't support wake on * lan on a particular port */ switch (pdev->device) { case E1000_DEV_ID_82575GB_QUAD_COPPER: - adapter->eeprom_wol = 0; + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; break; case E1000_DEV_ID_82575EB_FIBER_SERDES: case E1000_DEV_ID_82576_FIBER: @@ -2098,28 +2206,49 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) - adapter->eeprom_wol = 0; + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; break; case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) - adapter->eeprom_wol = 0; + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; else adapter->flags |= IGB_FLAG_QUAD_PORT_A; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; } /* initialize the wol settings based on the eeprom settings */ - adapter->wol = adapter->eeprom_wol; - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); /* reset the hardware with the new settings */ igb_reset(adapter); + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + /* let the f/w know that the h/w is now under the control of the * driver. */ igb_get_hw_control(adapter); @@ -2140,11 +2269,29 @@ static int __devinit igb_probe(struct pci_dev *pdev, } #endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; -#ifdef CONFIG_IGB_PTP + /* + * Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif /* do hw tstamp init after resetting */ igb_ptp_init(adapter); -#endif /* CONFIG_IGB_PTP */ dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ @@ -2183,6 +2330,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, err_register: igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); err_eeprom: if (!igb_check_reset_block(hw)) igb_reset_phy(hw); @@ -2203,6 +2351,111 @@ err_dma: return err; } +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (igb_vfs_are_assigned(adapter)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + int err = 0; + int i; + + if (!num_vfs) + goto out; + else if (old_vfs && old_vfs == num_vfs) + goto out; + else if (old_vfs && old_vfs != num_vfs) + err = igb_disable_sriov(pdev); + + if (err) + goto out; + + if (num_vfs > 7) { + err = -EPERM; + goto out; + } + + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + dev_err(&pdev->dev, + "Unable to allocate memory for VF Data Storage\n"); + err = -ENOMEM; + goto out; + } + + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/* + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + * + */ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + /** * igb_remove - Device Removal Routine * @pdev: PCI device information struct @@ -2212,17 +2465,18 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void __devexit igb_remove(struct pci_dev *pdev) +static void igb_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; pm_runtime_get_noresume(&pdev->dev); -#ifdef CONFIG_IGB_PTP +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); igb_ptp_stop(adapter); -#endif /* CONFIG_IGB_PTP */ - /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@ -2252,23 +2506,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV - /* reclaim resources allocated to VFs */ - if (adapter->vf_data) { - /* disable iov and allow time for transactions to clear */ - if (igb_vfs_are_assigned(adapter)) { - dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - } else { - pci_disable_sriov(pdev); - msleep(500); - } - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - wrfl(); - msleep(100); - dev_info(&pdev->dev, "IOV Disabled\n"); - } + igb_disable_sriov(pdev); #endif iounmap(hw->hw_addr); @@ -2294,108 +2532,27 @@ static void __devexit igb_remove(struct pci_dev *pdev) * mor expensive time wise to disable SR-IOV than it is to allocate and free * the memory for the VFs. **/ -static void __devinit igb_probe_vfs(struct igb_adapter * adapter) +static void igb_probe_vfs(struct igb_adapter *adapter) { #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - int old_vfs = pci_num_vf(adapter->pdev); - int i; /* Virtualization features not supported on i210 family. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; - if (old_vfs) { - dev_info(&pdev->dev, "%d pre-allocated VFs found - override " - "max_vfs setting of %d\n", old_vfs, max_vfs); - adapter->vfs_allocated_count = old_vfs; - } - - if (!adapter->vfs_allocated_count) - return; - - adapter->vf_data = kcalloc(adapter->vfs_allocated_count, - sizeof(struct vf_data_storage), GFP_KERNEL); + igb_enable_sriov(pdev, max_vfs); + pci_sriov_set_totalvfs(pdev, 7); - /* if allocation failed then we do not support SR-IOV */ - if (!adapter->vf_data) { - adapter->vfs_allocated_count = 0; - dev_err(&pdev->dev, "Unable to allocate memory for VF " - "Data Storage\n"); - goto out; - } - - if (!old_vfs) { - if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) - goto err_out; - } - dev_info(&pdev->dev, "%d VFs allocated\n", - adapter->vfs_allocated_count); - for (i = 0; i < adapter->vfs_allocated_count; i++) - igb_vf_configure(adapter, i); - - /* DMA Coalescing is not supported in IOV mode. */ - adapter->flags &= ~IGB_FLAG_DMAC; - goto out; -err_out: - kfree(adapter->vf_data); - adapter->vf_data = NULL; - adapter->vfs_allocated_count = 0; -out: - return; #endif /* CONFIG_PCI_IOV */ } -/** - * igb_sw_init - Initialize general software structures (struct igb_adapter) - * @adapter: board private structure to initialize - * - * igb_sw_init initializes the Adapter private data structure. - * Fields are initialized based on PCI device information and - * OS network device settings (MTU size). - **/ -static int __devinit igb_sw_init(struct igb_adapter *adapter) +static void igb_init_queue_configuration(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; u32 max_rss_queues; - pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); - - /* set default ring sizes */ - adapter->tx_ring_count = IGB_DEFAULT_TXD; - adapter->rx_ring_count = IGB_DEFAULT_RXD; - - /* set default ITR values */ - adapter->rx_itr_setting = IGB_DEFAULT_ITR; - adapter->tx_itr_setting = IGB_DEFAULT_ITR; - - /* set default work limits */ - adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; - - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; - adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - - spin_lock_init(&adapter->stats64_lock); -#ifdef CONFIG_PCI_IOV - switch (hw->mac.type) { - case e1000_82576: - case e1000_i350: - if (max_vfs > 7) { - dev_warn(&pdev->dev, - "Maximum of 7 VFs per PF, using max\n"); - adapter->vfs_allocated_count = 7; - } else - adapter->vfs_allocated_count = max_vfs; - break; - default: - break; - } -#endif /* CONFIG_PCI_IOV */ - /* Determine the maximum number of RSS queues supported. */ switch (hw->mac.type) { case e1000_i211: @@ -2454,14 +2611,67 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; break; } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + igb_init_queue_configuration(adapter); /* Setup and initialize a copy of the hw vlan table array */ - adapter->shadow_vfta = kzalloc(sizeof(u32) * - E1000_VLAN_FILTER_TBL_SIZE, - GFP_ATOMIC); + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_ATOMIC); /* This call may decrease the number of queues */ - if (igb_init_interrupt_scheme(adapter)) { + if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -2531,6 +2741,17 @@ static int __igb_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (err) + goto err_set_queues; + /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); @@ -2560,6 +2781,8 @@ static int __igb_open(struct net_device *netdev, bool resuming) return 0; +err_set_queues: + igb_free_irq(adapter); err_req_irq: igb_release_hw_control(adapter); igb_power_down_link(adapter); @@ -2637,10 +2860,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = dma_alloc_coherent(dev, - tx_ring->size, - &tx_ring->dma, - GFP_KERNEL); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -2777,18 +2998,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) if (!rx_ring->rx_buffer_info) goto err; - /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(dev, - rx_ring->size, - &rx_ring->dma, - GFP_KERNEL); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -2893,18 +3112,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) /* Don't need to set TUOFL or IPOFL, they default to 1 */ wr32(E1000_RXCSUM, rxcsum); - /* - * Generate RSS hash based on TCP port numbers and/or - * IPv4/v6 src and dst addresses since UDP cannot be - * hashed reliably due to IP fragmentation - */ + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ mrqc = E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + /* If VMDq is enabled then we set the appropriate mode for that, else * we default to RSS so that an RSS hash is calculated per packet even * if we are only using one queue */ @@ -3106,16 +3328,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, /* set descriptor configuration */ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; -#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 - srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; -#else - srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; -#endif - srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; -#ifdef CONFIG_IGB_PTP + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; -#endif /* CONFIG_IGB_PTP */ /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; @@ -3134,6 +3350,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, wr32(E1000_RXDCTL(reg_idx), rxdctl); } +static void igb_set_rx_buffer_len(struct igb_adapter *adapter, + struct igb_ring *rx_ring) +{ +#define IGB_MAX_BUILD_SKB_SIZE \ + (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ + (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) + + /* set build_skb flag */ + if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) + set_ring_build_skb_enabled(rx_ring); + else + clear_ring_build_skb_enabled(rx_ring); +} + /** * igb_configure_rx - Configure receive Unit after Reset * @adapter: board private structure @@ -3153,8 +3383,11 @@ static void igb_configure_rx(struct igb_adapter *adapter) /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ - for (i = 0; i < adapter->num_rx_queues; i++) - igb_configure_rx_ring(adapter, adapter->rx_ring[i]); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *rx_ring = adapter->rx_ring[i]; + igb_set_rx_buffer_len(adapter, rx_ring); + igb_configure_rx_ring(adapter, rx_ring); + } } /** @@ -3305,36 +3538,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) unsigned long size; u16 i; + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + if (!rx_ring->rx_buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; - if (buffer_info->dma) { - dma_unmap_single(rx_ring->dev, - buffer_info->dma, - IGB_RX_HDR_LEN, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } - if (buffer_info->page_dma) { - dma_unmap_page(rx_ring->dev, - buffer_info->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - buffer_info->page_dma = 0; - } - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - buffer_info->page_offset = 0; - } + if (!buffer_info->page) + continue; + + dma_unmap_page(rx_ring->dev, + buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(buffer_info->page); + + buffer_info->page = NULL; } size = sizeof(struct igb_rx_buffer) * rx_ring->count; @@ -3343,6 +3567,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -3779,6 +4004,7 @@ static void igb_watchdog_task(struct work_struct *work) } igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); /* Reset the timer */ if (!test_bit(__IGB_DOWN, &adapter->state)) @@ -4028,6 +4254,9 @@ static int igb_tso(struct igb_ring *tx_ring, u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; @@ -4148,26 +4377,32 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); } -static __le32 igb_tx_cmd_type(u32 tx_flags) +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ - __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA | - E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; /* set HW vlan bit if vlan is present */ - if (tx_flags & IGB_TX_FLAGS_VLAN) - cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE); + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); + + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); -#ifdef CONFIG_IGB_PTP /* set timestamp bit if present */ - if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) - cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); -#endif /* CONFIG_IGB_PTP */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); - /* set segmentation bits for TSO */ - if (tx_flags & IGB_TX_FLAGS_TSO) - cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE); + /* insert frame checksum */ + cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); return cmd_type; } @@ -4178,30 +4413,23 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring, { u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; - /* 82575 requires a unique index per ring if any offload is enabled */ - if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) && - test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) olinfo_status |= tx_ring->reg_idx << 4; /* insert L4 checksum */ - if (tx_flags & IGB_TX_FLAGS_CSUM) { - olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); - /* insert IPv4 checksum */ - if (tx_flags & IGB_TX_FLAGS_IPV4) - olinfo_status |= E1000_TXD_POPTS_IXSM << 8; - } + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } -/* - * The largest size we can write to the descriptor is 65535. In order to - * maintain a power of two alignment we have to limit ourselves to 32K. - */ -#define IGB_MAX_TXD_PWR 15 -#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) - static void igb_tx_map(struct igb_ring *tx_ring, struct igb_tx_buffer *first, const u8 hdr_len) @@ -4209,33 +4437,37 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb = first->skb; struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag; dma_addr_t dma; - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int data_len = skb->data_len; - unsigned int size = skb_headlen(skb); - unsigned int paylen = skb->len - hdr_len; - __le32 cmd_type; + unsigned int data_len, size; u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); u16 i = tx_ring->next_to_use; tx_desc = IGB_TX_DESC(tx_ring, i); - igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen); - cmd_type = igb_tx_cmd_type(tx_flags); + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; - /* record length, and DMA address */ - dma_unmap_len_set(first, len, size); - dma_unmap_addr_set(first, dma, dma); - tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); - for (;;) { while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = - cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD); + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); i++; tx_desc++; @@ -4243,18 +4475,18 @@ static void igb_tx_map(struct igb_ring *tx_ring, tx_desc = IGB_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; dma += IGB_MAX_DATA_PER_TXD; size -= IGB_MAX_DATA_PER_TXD; - tx_desc->read.olinfo_status = 0; tx_desc->read.buffer_addr = cpu_to_le64(dma); } if (likely(!data_len)) break; - tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); i++; tx_desc++; @@ -4262,32 +4494,22 @@ static void igb_tx_map(struct igb_ring *tx_ring, tx_desc = IGB_TX_DESC(tx_ring, 0); i = 0; } + tx_desc->read.olinfo_status = 0; size = skb_frag_size(frag); data_len -= size; dma = skb_frag_dma_map(tx_ring->dev, frag, 0, - size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) - goto dma_error; + size, DMA_TO_DEVICE); tx_buffer = &tx_ring->tx_buffer_info[i]; - dma_unmap_len_set(tx_buffer, len, size); - dma_unmap_addr_set(tx_buffer, dma, dma); - - tx_desc->read.olinfo_status = 0; - tx_desc->read.buffer_addr = cpu_to_le64(dma); - - frag++; } - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - /* write last descriptor with RS and EOP bits */ - cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); - if (unlikely(skb->no_fcs)) - cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS)); - tx_desc->read.cmd_type_len = cmd_type; + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); /* set the timestamp */ first->time_stamp = jiffies; @@ -4372,21 +4594,29 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { -#ifdef CONFIG_IGB_PTP struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); -#endif /* CONFIG_IGB_PTP */ struct igb_tx_buffer *first; int tso; u32 tx_flags = 0; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = vlan_get_protocol(skb); u8 hdr_len = 0; - /* need: 1 descriptor per page, + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, * + 1 desc for context descriptor, - * otherwise try next time */ - if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { + * otherwise try next time + */ + if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { + unsigned short f; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + } else { + count += skb_shinfo(skb)->nr_frags; + } + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { /* this is a hard error */ return NETDEV_TX_BUSY; } @@ -4397,17 +4627,18 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; -#ifdef CONFIG_IGB_PTP + skb_tx_timestamp(skb); + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && !(adapter->ptp_tx_skb))) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; if (adapter->hw.mac.type == e1000_82576) schedule_work(&adapter->ptp_tx_work); } -#endif /* CONFIG_IGB_PTP */ if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; @@ -4427,7 +4658,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, igb_tx_map(tx_ring, first, hdr_len); /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; @@ -4467,10 +4698,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, * The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (skb->len < 17) { - if (skb_padto(skb, 17)) + if (unlikely(skb->len < 17)) { + if (skb_pad(skb, 17 - skb->len)) return NETDEV_TX_OK; skb->len = 17; + skb_set_tail_pointer(skb, 17); } return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); @@ -4800,7 +5032,6 @@ static irqreturn_t igb_msix_other(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } -#ifdef CONFIG_IGB_PTP if (icr & E1000_ICR_TS) { u32 tsicr = rd32(E1000_TSICR); @@ -4811,7 +5042,6 @@ static irqreturn_t igb_msix_other(int irq, void *data) schedule_work(&adapter->ptp_tx_work); } } -#endif /* CONFIG_IGB_PTP */ wr32(E1000_EIMS, adapter->eims_other); @@ -4851,45 +5081,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data) } #ifdef CONFIG_IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + static void igb_update_dca(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; int cpu = get_cpu(); if (q_vector->cpu == cpu) goto out_no_update; - if (q_vector->tx.ring) { - int q = q_vector->tx.ring->reg_idx; - u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); - if (hw->mac.type == e1000_82575) { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else { - dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; - dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_TXCTRL_CPUID_SHIFT; - } - dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; - wr32(E1000_DCA_TXCTRL(q), dca_txctrl); - } - if (q_vector->rx.ring) { - int q = q_vector->rx.ring->reg_idx; - u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); - if (hw->mac.type == e1000_82575) { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else { - dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; - dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << - E1000_DCA_RXCTRL_CPUID_SHIFT; - } - dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; - dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; - dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; - wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); - } + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + q_vector->cpu = cpu; out_no_update: put_cpu(); @@ -4964,7 +5212,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf) { unsigned char mac_addr[ETH_ALEN]; - eth_random_addr(mac_addr); + eth_zero_addr(mac_addr); igb_set_vf_mac(adapter, vf, mac_addr); return 0; @@ -5317,9 +5565,9 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) { unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - /* generate a new mac address as we were hotplug removed/added */ + /* clear mac address as we were hotplug removed/added */ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) - eth_random_addr(vf_mac); + eth_zero_addr(vf_mac); /* process remaining reset events */ igb_vf_reset(adapter, vf); @@ -5545,7 +5793,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } -#ifdef CONFIG_IGB_PTP if (icr & E1000_ICR_TS) { u32 tsicr = rd32(E1000_TSICR); @@ -5556,7 +5803,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data) schedule_work(&adapter->ptp_tx_work); } } -#endif /* CONFIG_IGB_PTP */ napi_schedule(&q_vector->napi); @@ -5599,7 +5845,6 @@ static irqreturn_t igb_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } -#ifdef CONFIG_IGB_PTP if (icr & E1000_ICR_TS) { u32 tsicr = rd32(E1000_TSICR); @@ -5610,7 +5855,6 @@ static irqreturn_t igb_intr(int irq, void *data) schedule_work(&adapter->ptp_tx_work); } } -#endif /* CONFIG_IGB_PTP */ napi_schedule(&q_vector->napi); @@ -5702,7 +5946,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) break; /* prevent any other reads prior to eop_desc */ - rmb(); + read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) @@ -5818,9 +6062,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } } +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && - igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -5840,6 +6085,257 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) return !!budget; } +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer)); + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, + old_buff->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + struct page *page, + unsigned int truesize) +{ + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IGB_RX_BUFSZ; + + /* since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unnecessary locked operation + */ + atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + return igb_can_reuse_rx_page(rx_buffer, page, truesize); +} + +static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + struct igb_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + void *page_addr; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(NET_SKB_PAD + + NET_IP_ALIGN + + size); +#endif + + /* If we spanned a buffer we have a huge mess so test for it */ + BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); +#endif + + /* build an skb to around the page buffer */ + skb = build_skb(page_addr, truesize); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* update pointers within the skb to store the data */ + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); + __skb_put(skb, size); + + /* pull timestamp out of packet data */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); + __skb_pull(skb, IGB_TS_HDR_LEN); + } + + if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; +} + +static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct igb_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IGB_RX_HDR_LEN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + + return skb; +} + static inline void igb_rx_checksum(struct igb_ring *ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -5889,147 +6385,350 @@ static inline void igb_rx_hash(struct igb_ring *ring, skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); } -static void igb_rx_vlan(struct igb_ring *ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max_len: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, and GRO offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + **/ +static unsigned int igb_get_headlen(unsigned char *data, + unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == __constant_htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + } + + /* handle L3 protocols */ + if (protocol == __constant_htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!hdr.ipv4->frag_off) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == __constant_htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); + } else { + return hdr.network - data; + } + + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; + + hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; +} + +/** + * igb_pull_tail - igb specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being adjusted + * + * This function is an igb specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void igb_pull_tail(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + /* retrieve timestamp from buffer */ + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + + /* update pointers to remove timestamp header */ + skb_frag_size_sub(frag, IGB_TS_HDR_LEN); + frag->page_offset += IGB_TS_HDR_LEN; + skb->data_len -= IGB_TS_HDR_LEN; + skb->len -= IGB_TS_HDR_LEN; + + /* move va to start of packet data */ + va += IGB_TS_HDR_LEN; + } + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + igb_pull_tail(rx_ring, rx_desc, skb); + + /* if skb_pad returns an error the skb was freed */ + if (unlikely(skb->len < 60)) { + int pad_len = 60 - skb->len; + + if (skb_pad(skb, pad_len)) + return true; + __skb_put(skb, pad_len); + } + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + igb_rx_hash(rx_ring, rx_desc, skb); + + igb_rx_checksum(rx_ring, rx_desc, skb); + + igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); + + if ((dev->features & NETIF_F_HW_VLAN_RX) && + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { u16 vid; if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && - test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags)) + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) vid = be16_to_cpu(rx_desc->wb.upper.vlan); else vid = le16_to_cpu(rx_desc->wb.upper.vlan); __vlan_hwaccel_put_tag(skb, vid); } -} -static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) -{ - /* HW will not DMA in data larger than the given buffer, even if it - * parses the (NFS, of course) header to be larger. In that case, it - * fills the header buffer and spills the rest into the page. - */ - u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > IGB_RX_HDR_LEN) - hlen = IGB_RX_HDR_LEN; - return hlen; + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); } -static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) +static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_ring *rx_ring = q_vector->rx.ring; - union e1000_adv_rx_desc *rx_desc; - const int current_node = numa_node_id(); + struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); - u16 i = rx_ring->next_to_clean; - rx_desc = IGB_RX_DESC(rx_ring, i); - - while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { - struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; - struct sk_buff *skb = buffer_info->skb; - union e1000_adv_rx_desc *next_rxd; + do { + union e1000_adv_rx_desc *rx_desc; - buffer_info->skb = NULL; - prefetch(skb->data); + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } - i++; - if (i == rx_ring->count) - i = 0; + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); - next_rxd = IGB_RX_DESC(rx_ring, i); - prefetch(next_rxd); + if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) + break; - /* - * This memory barrier is needed to keep us from reading + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * RXD_STAT_DD bit is set */ rmb(); - if (!skb_is_nonlinear(skb)) { - __skb_put(skb, igb_get_hlen(rx_desc)); - dma_unmap_single(rx_ring->dev, buffer_info->dma, - IGB_RX_HDR_LEN, - DMA_FROM_DEVICE); - buffer_info->dma = 0; - } - - if (rx_desc->wb.upper.length) { - u16 length = le16_to_cpu(rx_desc->wb.upper.length); - - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); - - skb->len += length; - skb->data_len += length; - skb->truesize += PAGE_SIZE / 2; + /* retrieve a buffer from the ring */ + if (ring_uses_build_skb(rx_ring)) + skb = igb_build_rx_buffer(rx_ring, rx_desc); + else + skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); - if ((page_count(buffer_info->page) != 1) || - (page_to_nid(buffer_info->page) != current_node)) - buffer_info->page = NULL; - else - get_page(buffer_info->page); + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; - dma_unmap_page(rx_ring->dev, buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - buffer_info->page_dma = 0; - } + cleaned_count++; - if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) { - struct igb_rx_buffer *next_buffer; - next_buffer = &rx_ring->rx_buffer_info[i]; - buffer_info->skb = next_buffer->skb; - buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - goto next_desc; - } + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; - if (unlikely((igb_test_staterr(rx_desc, - E1000_RXDEXT_ERR_FRAME_ERR_MASK)) - && !(rx_ring->netdev->features & NETIF_F_RXALL))) { - dev_kfree_skb_any(skb); - goto next_desc; + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; } -#ifdef CONFIG_IGB_PTP - igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb); -#endif /* CONFIG_IGB_PTP */ - igb_rx_hash(rx_ring, rx_desc, skb); - igb_rx_checksum(rx_ring, rx_desc, skb); - igb_rx_vlan(rx_ring, rx_desc, skb); - + /* probably a little skewed due to removing CRC */ total_bytes += skb->len; - total_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(&q_vector->napi, skb); - budget--; -next_desc: - if (!budget) - break; + /* reset skb pointer */ + skb = NULL; - cleaned_count++; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IGB_RX_BUFFER_WRITE) { - igb_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } + /* update budget accounting */ + total_packets++; + } while (likely(total_packets < budget)); - /* use prefetched values */ - rx_desc = next_rxd; - } + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->rx_syncp); rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; @@ -6040,74 +6739,53 @@ next_desc: if (cleaned_count) igb_alloc_rx_buffers(rx_ring, cleaned_count); - return !!budget; + return (total_packets < budget); } -static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, - struct igb_rx_buffer *bi) +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) { - struct sk_buff *skb = bi->skb; - dma_addr_t dma = bi->dma; + struct page *page = bi->page; + dma_addr_t dma; - if (dma) + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) return true; - if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IGB_RX_HDR_LEN); - bi->skb = skb; - if (!skb) { - rx_ring->rx_stats.alloc_failed++; - return false; - } - - /* initialize skb for ring */ - skb_record_rx_queue(skb, rx_ring->queue_index); + /* alloc new page for storage */ + page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; } - dma = dma_map_single(rx_ring->dev, skb->data, - IGB_RX_HDR_LEN, DMA_FROM_DEVICE); + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + rx_ring->rx_stats.alloc_failed++; return false; } bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + return true; } -static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, - struct igb_rx_buffer *bi) +static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) { - struct page *page = bi->page; - dma_addr_t page_dma = bi->page_dma; - unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2); - - if (page_dma) - return true; - - if (!page) { - page = __skb_alloc_page(GFP_ATOMIC, bi->skb); - bi->page = page; - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_failed++; - return false; - } - } - - page_dma = dma_map_page(rx_ring->dev, page, - page_offset, PAGE_SIZE / 2, - DMA_FROM_DEVICE); - - if (dma_mapping_error(rx_ring->dev, page_dma)) { - rx_ring->rx_stats.alloc_failed++; - return false; - } - - bi->page_dma = page_dma; - bi->page_offset = page_offset; - return true; + if (ring_uses_build_skb(rx_ring)) + return NET_SKB_PAD + NET_IP_ALIGN; + else + return 0; } /** @@ -6120,22 +6798,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) struct igb_rx_buffer *bi; u16 i = rx_ring->next_to_use; + /* nothing to do */ + if (!cleaned_count) + return; + rx_desc = IGB_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; - while (cleaned_count--) { - if (!igb_alloc_mapped_skb(rx_ring, bi)) - break; - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. */ - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - + do { if (!igb_alloc_mapped_page(rx_ring, bi)) break; - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + + bi->page_offset + + igb_rx_offset(rx_ring)); rx_desc++; bi++; @@ -6148,17 +6829,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) /* clear the hdr_addr for the next_to_use descriptor */ rx_desc->read.hdr_addr = 0; - } + + cleaned_count--; + } while (cleaned_count); i += rx_ring->count; if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ rx_ring->next_to_use = i; - /* Force memory writes to complete before letting h/w + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* + * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, rx_ring->tail); } @@ -6207,10 +6896,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); -#ifdef CONFIG_IGB_PTP case SIOCSHWTSTAMP: return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); -#endif /* CONFIG_IGB_PTP */ default: return -EOPNOTSUPP; } @@ -6478,7 +7165,7 @@ static int igb_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (igb_init_interrupt_scheme(adapter)) { + if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } @@ -6492,7 +7179,9 @@ static int igb_resume(struct device *dev) wr32(E1000_WUS, ~0); if (netdev->flags & IFF_UP) { + rtnl_lock(); err = __igb_open(netdev, true); + rtnl_unlock(); if (err) return err; } @@ -6553,6 +7242,72 @@ static void igb_shutdown(struct pci_dev *pdev) } } +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs @@ -6958,4 +7713,72 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) } } +/* igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + */ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return E1000_SUCCESS; + } +} + +/* igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + */ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return E1000_SUCCESS; + +} /* igb_main.c */ |