summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c35
1 files changed, 13 insertions, 22 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 02b0b5272c14..42443f434569 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -335,7 +335,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
@@ -347,7 +347,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
- EFX_BUG_ON_PARANOID(space < batch_size);
+ EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
@@ -400,21 +400,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*/
rx_buf->flags |= EFX_RX_PKT_DISCARD;
- if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
- if (net_ratelimit())
- netif_err(efx, rx_err, efx->net_dev,
- " RX queue %d seriously overlength "
- "RX event (0x%x > 0x%x+0x%x). Leaking\n",
- efx_rx_queue_index(rx_queue), len, max_len,
- efx->type->rx_buffer_padding);
- efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
- } else {
- if (net_ratelimit())
- netif_err(efx, rx_err, efx->net_dev,
- " RX queue %d overlength RX event "
- "(0x%x > 0x%x)\n",
- efx_rx_queue_index(rx_queue), len, max_len);
- }
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX queue %d overlength RX event (%#x > %#x)\n",
+ efx_rx_queue_index(rx_queue), len, max_len);
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
@@ -445,6 +434,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -486,7 +476,7 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
return NULL;
}
- EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
+ EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
efx->rx_prefix_size + hdr_len);
@@ -632,8 +622,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
/* Set the SKB flags */
skb_checksum_none_assert(skb);
- if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
+ if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+ }
efx_rx_skb_attach_timestamp(channel, skb);
@@ -676,8 +668,7 @@ void __efx_rx_packet(struct efx_channel *channel)
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
- !efx_channel_busy_polling(channel))
+ if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
@@ -693,7 +684,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
- EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
rx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,