summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40evf/i40evf_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf/i40evf_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c311
1 files changed, 212 insertions, 99 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7c213a347909..7b2a4eba92e2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 3
#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 0
+#define DRV_VERSION_BUILD 1
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -430,57 +430,26 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
{
+ int rings_remaining = adapter->num_active_queues;
+ int ridx = 0, vidx = 0;
int q_vectors;
- int v_start = 0;
- int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->num_active_queues;
- int txr_remaining = adapter->num_active_queues;
- int i, j;
- int rqpv, tqpv;
- int err = 0;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- /* The ideal configuration...
- * We have enough vectors to map one per queue.
- */
- if (q_vectors >= (rxr_remaining * 2)) {
- for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
- i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+ for (; ridx < rings_remaining; ridx++) {
+ i40evf_map_vector_to_rxq(adapter, vidx, ridx);
+ i40evf_map_vector_to_txq(adapter, vidx, ridx);
- for (; txr_idx < txr_remaining; v_start++, txr_idx++)
- i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
- goto out;
- }
-
- /* If we don't have enough vectors for a 1-to-1
- * mapping, we'll have to group them so there are
- * multiple queues per vector.
- * Re-adjusting *qpv takes care of the remainder.
- */
- for (i = v_start; i < q_vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
- for (j = 0; j < rqpv; j++) {
- i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
- rxr_idx++;
- rxr_remaining--;
- }
- }
- for (i = v_start; i < q_vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
- for (j = 0; j < tqpv; j++) {
- i40evf_map_vector_to_txq(adapter, i, txr_idx);
- txr_idx++;
- txr_remaining--;
- }
+ /* In the case where we have more queues than vectors, continue
+ * round-robin on vectors until all queues are mapped.
+ */
+ if (++vidx >= q_vectors)
+ vidx = 0;
}
-out:
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
-
- return err;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -520,7 +489,7 @@ static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
struct i40e_q_vector *q_vector =
container_of(notify, struct i40e_q_vector, affinity_notify);
- q_vector->affinity_mask = *mask;
+ cpumask_copy(&q_vector->affinity_mask, mask);
}
/**
@@ -543,9 +512,10 @@ static void i40evf_irq_affinity_release(struct kref *ref) {}
static int
i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
{
- int vector, err, q_vectors;
- int rx_int_idx = 0, tx_int_idx = 0;
- int irq_num;
+ unsigned int vector, q_vectors;
+ unsigned int rx_int_idx = 0, tx_int_idx = 0;
+ int irq_num, err;
+ int cpu;
i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -556,18 +526,15 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "i40evf-%s-%s-%d", basename,
- "TxRx", rx_int_idx++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "i40evf-%s-%s-%d", basename,
- "rx", rx_int_idx++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "i40evf-%s-rx-%d", basename, rx_int_idx++);
} else if (q_vector->tx.ring) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "i40evf-%s-%s-%d", basename,
- "tx", tx_int_idx++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "i40evf-%s-tx-%d", basename, tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
@@ -587,8 +554,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector->affinity_notify.release =
i40evf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ /* Spread the IRQ affinity hints across online CPUs. Note that
+ * get_cpu_mask returns a mask with a permanent lifetime so
+ * it's safe to use as a hint for irq_set_affinity_hint.
+ */
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -909,6 +880,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ } else {
+ f->remove = false;
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
@@ -1143,6 +1116,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
/**
@@ -1217,9 +1191,18 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
int i, num_active_queues;
- num_active_queues = min_t(int,
- adapter->vsi_res->num_queue_pairs,
- (int)(num_online_cpus()));
+ /* If we're in reset reallocating queues we don't actually know yet for
+ * certain the PF gave us the number of queues we asked for but we'll
+ * assume it did. Once basic reset is finished we'll confirm once we
+ * start negotiating config with PF.
+ */
+ if (adapter->num_req_queues)
+ num_active_queues = adapter->num_req_queues;
+ else
+ num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
+
adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
@@ -1240,8 +1223,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
- tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
- if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->tx_itr_setting = I40E_ITR_TX_DEF;
+ if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
rx_ring = &adapter->rx_rings[i];
@@ -1249,7 +1232,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
- rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
+ rx_ring->rx_itr_setting = I40E_ITR_RX_DEF;
}
adapter->num_active_queues = num_active_queues;
@@ -1417,7 +1400,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- if (adapter->vf_res->vf_offload_flags &
+ if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
else
@@ -1458,6 +1441,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
+ cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT);
}
@@ -1567,12 +1551,53 @@ static void i40evf_free_rss(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (netif_running(netdev))
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_q_vectors(adapter);
+ i40evf_free_queues(adapter);
+
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err;
+
+ netif_tx_stop_all_queues(netdev);
+
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err;
+
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+
+ i40evf_map_rings_to_vectors(adapter);
+
+ if (RSS_AQ(adapter))
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ else
+ err = i40evf_init_rss(adapter);
+err:
+ return err;
+}
+
+/**
* i40evf_watchdog_timer - Periodic call-back timer
* @data: pointer to adapter disguised as unsigned long
**/
-static void i40evf_watchdog_timer(unsigned long data)
+static void i40evf_watchdog_timer(struct timer_list *t)
{
- struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ struct i40evf_adapter *adapter = from_timer(adapter, t,
+ watchdog_timer);
schedule_work(&adapter->watchdog_task);
/* timer will be rescheduled in watchdog task */
@@ -1678,6 +1703,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
+ i40evf_enable_vlan_stripping(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
+ i40evf_disable_vlan_stripping(adapter);
+ goto watchdog_done;
+ }
+
if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
i40evf_configure_queues(adapter);
goto watchdog_done;
@@ -1794,6 +1829,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
adapter->state = __I40EVF_DOWN;
+ wake_up(&adapter->down_waitqueue);
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}
@@ -1877,7 +1913,7 @@ static void i40evf_reset_task(struct work_struct *work)
}
continue_reset:
- if (netif_running(adapter->netdev)) {
+ if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -1901,8 +1937,15 @@ continue_reset:
if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
err);
+ adapter->aq_required = 0;
+
+ if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
+ err = i40evf_reinit_interrupt_scheme(adapter);
+ if (err)
+ goto reset_err;
+ }
- adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
/* re-add all MAC filters */
@@ -1932,6 +1975,15 @@ continue_reset:
if (err)
goto reset_err;
+ if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
+ err = i40evf_request_traffic_irqs(adapter,
+ netdev->name);
+ if (err)
+ goto reset_err;
+
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ }
+
i40evf_configure(adapter);
i40evf_up_complete(adapter);
@@ -1939,12 +1991,13 @@ continue_reset:
i40evf_irq_enable(adapter, true);
} else {
adapter->state = __I40EVF_DOWN;
+ wake_up(&adapter->down_waitqueue);
}
return;
reset_err:
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- i40evf_close(adapter->netdev);
+ i40evf_close(netdev);
}
/**
@@ -1957,8 +2010,8 @@ static void i40evf_adminq_task(struct work_struct *work)
container_of(work, struct i40evf_adapter, adminq_task);
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- struct virtchnl_msg *v_msg;
- i40e_status ret;
+ enum virtchnl_ops v_op;
+ i40e_status ret, v_ret;
u32 val, oldval;
u16 pending;
@@ -1970,15 +2023,15 @@ static void i40evf_adminq_task(struct work_struct *work)
if (!event.msg_buf)
goto out;
- v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
- if (ret || !v_msg->v_opcode)
+ v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+
+ if (ret || !v_op)
break; /* No event to process or error cleaning ARQ */
- i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
- (i40e_status)v_msg->v_retval,
- event.msg_buf,
+ i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
event.msg_len);
if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
@@ -2057,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ goto out;
+ }
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@@ -2065,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
- goto out;
- }
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
@@ -2238,6 +2291,7 @@ err_setup_tx:
static int i40evf_close(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int status;
if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
@@ -2255,7 +2309,18 @@ static int i40evf_close(struct net_device *netdev)
* still active and can DMA into memory. Resources are cleared in
* i40evf_virtchnl_completion() after we get confirmation from the PF
* driver that the rings have been stopped.
+ *
+ * Also, we wait for state to transition to __I40EVF_DOWN before
+ * returning. State change occurs in i40evf_virtchnl_completion() after
+ * VF resources are released (which occurs after PF driver processes and
+ * responds to admin queue commands).
*/
+
+ status = wait_event_timeout(adapter->down_waitqueue,
+ adapter->state == __I40EVF_DOWN,
+ msecs_to_jiffies(200));
+ if (!status)
+ netdev_warn(netdev, "Device resources not yet released\n");
return 0;
}
@@ -2282,6 +2347,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
+ **/
+static int i40evf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (!VLAN_ALLOWED(adapter))
+ return -EINVAL;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+
+ return 0;
+}
+
+/**
* i40evf_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
* @netdev: This physical port's netdev
@@ -2339,10 +2426,6 @@ out_err:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
-#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
- NETIF_F_HW_VLAN_CTAG_RX |\
- NETIF_F_HW_VLAN_CTAG_FILTER)
-
/**
* i40evf_fix_features - fix up the netdev feature bits
* @netdev: our net device
@@ -2355,9 +2438,11 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- features &= ~I40EVF_VLAN_FEATURES;
- if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
- features |= I40EVF_VLAN_FEATURES;
+ if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+
return features;
}
@@ -2374,6 +2459,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
.ndo_features_check = i40evf_features_check,
.ndo_fix_features = i40evf_fix_features,
+ .ndo_set_features = i40evf_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
@@ -2411,9 +2497,9 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
int i40evf_process_config(struct i40evf_adapter *adapter)
{
struct virtchnl_vf_resource *vfres = adapter->vf_res;
+ int i, num_req_queues = adapter->num_req_queues;
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
netdev_features_t hw_enc_features;
netdev_features_t hw_features;
@@ -2427,6 +2513,23 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
return -ENODEV;
}
+ if (num_req_queues &&
+ num_req_queues != adapter->vsi_res->num_queue_pairs) {
+ /* Problem. The PF gave us fewer queues than what we had
+ * negotiated in our request. Need a reset to see if we can't
+ * get back to a working state.
+ */
+ dev_err(&adapter->pdev->dev,
+ "Requested %d queues, but PF only gave us %d.\n",
+ num_req_queues,
+ adapter->vsi_res->num_queue_pairs);
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+ i40evf_schedule_reset(adapter);
+ return -ENODEV;
+ }
+ adapter->num_req_queues = 0;
+
hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -2443,7 +2546,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
/* advertise to stack only if offloads for encapsulated packets is
* supported
*/
- if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
@@ -2453,7 +2556,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
NETIF_F_GSO_PARTIAL |
0;
- if (!(vfres->vf_offload_flags &
+ if (!(vfres->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
netdev->gso_partial_features |=
NETIF_F_GSO_UDP_TUNNEL_CSUM;
@@ -2470,9 +2573,17 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
*/
hw_features = hw_enc_features;
+ /* Enable VLAN features if supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
netdev->hw_features |= hw_features;
- netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
+ netdev->features |= hw_features;
+
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2481,7 +2592,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
adapter->rss_key_size = vfres->rss_key_size;
adapter->rss_lut_size = vfres->rss_lut_size;
} else {
@@ -2625,7 +2736,7 @@ static void i40evf_init_task(struct work_struct *work)
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
+ netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2638,9 +2749,7 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &i40evf_watchdog_timer;
- adapter->watchdog_timer.data = (unsigned long)adapter;
+ timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
@@ -2649,7 +2758,7 @@ static void i40evf_init_task(struct work_struct *work)
if (err)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
- if (adapter->vf_res->vf_offload_flags &
+ if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
@@ -2683,6 +2792,7 @@ static void i40evf_init_task(struct work_struct *work)
adapter->state = __I40EVF_DOWN;
set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+ wake_up(&adapter->down_waitqueue);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
@@ -2844,6 +2954,9 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
schedule_delayed_work(&adapter->init_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ /* Setup the wait queue for indicating transition to down status */
+ init_waitqueue_head(&adapter->down_waitqueue);
+
return 0;
err_ioremap: