summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c59
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/skbuff.c47
3 files changed, 46 insertions, 66 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 1e0a1847c3bb..09cb3f6dc40c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3300,18 +3300,18 @@ ncls:
&& !skb_pfmemalloc_protocol(skb))
goto drop;
- rx_handler = rcu_dereference(skb->dev->rx_handler);
if (vlan_tx_tag_present(skb)) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
- if (vlan_do_receive(&skb, !rx_handler))
+ if (vlan_do_receive(&skb))
goto another_round;
else if (unlikely(!skb))
goto unlock;
}
+ rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3331,6 +3331,9 @@ ncls:
}
}
+ if (vlan_tx_nonzero_tag_present(skb))
+ skb->pkt_type = PACKET_OTHERHOST;
+
/* deliver only exact match when indicated */
null_or_dev = deliver_exact ? skb->dev : NULL;
@@ -3471,17 +3474,31 @@ out:
return netif_receive_skb(skb);
}
-inline void napi_gro_flush(struct napi_struct *napi)
+/* napi->gro_list contains packets ordered by age.
+ * youngest packets at the head of it.
+ * Complete skbs in reverse order to reduce latencies.
+ */
+void napi_gro_flush(struct napi_struct *napi, bool flush_old)
{
- struct sk_buff *skb, *next;
+ struct sk_buff *skb, *prev = NULL;
- for (skb = napi->gro_list; skb; skb = next) {
- next = skb->next;
+ /* scan list and build reverse chain */
+ for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
+ skb->prev = prev;
+ prev = skb;
+ }
+
+ for (skb = prev; skb; skb = prev) {
skb->next = NULL;
+
+ if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
+ return;
+
+ prev = skb->prev;
napi_gro_complete(skb);
+ napi->gro_count--;
}
- napi->gro_count = 0;
napi->gro_list = NULL;
}
EXPORT_SYMBOL(napi_gro_flush);
@@ -3542,6 +3559,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
napi->gro_count++;
NAPI_GRO_CB(skb)->count = 1;
+ NAPI_GRO_CB(skb)->age = jiffies;
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
skb->next = napi->gro_list;
napi->gro_list = skb;
@@ -3631,20 +3649,22 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
}
EXPORT_SYMBOL(napi_skb_finish);
-void skb_gro_reset_offset(struct sk_buff *skb)
+static void skb_gro_reset_offset(struct sk_buff *skb)
{
+ const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ const skb_frag_t *frag0 = &pinfo->frags[0];
+
NAPI_GRO_CB(skb)->data_offset = 0;
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail &&
- !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
- NAPI_GRO_CB(skb)->frag0 =
- skb_frag_address(&skb_shinfo(skb)->frags[0]);
- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
+ pinfo->nr_frags &&
+ !PageHighMem(skb_frag_page(frag0))) {
+ NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
}
}
-EXPORT_SYMBOL(skb_gro_reset_offset);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
@@ -3876,7 +3896,7 @@ void napi_complete(struct napi_struct *n)
if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
return;
- napi_gro_flush(n);
+ napi_gro_flush(n, false);
local_irq_save(flags);
__napi_complete(n);
local_irq_restore(flags);
@@ -3981,8 +4001,17 @@ static void net_rx_action(struct softirq_action *h)
local_irq_enable();
napi_complete(n);
local_irq_disable();
- } else
+ } else {
+ if (n->gro_list) {
+ /* flush too old packets
+ * If HZ < 1000, flush all packets.
+ */
+ local_irq_enable();
+ napi_gro_flush(n, HZ >= 1000);
+ local_irq_disable();
+ }
list_move_tail(&n->poll_list, &sd->poll_list);
+ }
}
netpoll_poll_unlock(have);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index baca771caae2..22571488730a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1301,8 +1301,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
if (!dst)
goto discard;
- __skb_pull(skb, skb_network_offset(skb));
-
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
@@ -1312,6 +1310,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
neigh_hh_init(neigh, dst);
do {
+ __skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
@@ -1342,9 +1341,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
unsigned int seq;
int err;
- __skb_pull(skb, skb_network_offset(skb));
-
do {
+ __skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cdc28598f4ef..6e04b1fa11f2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -655,53 +655,6 @@ void consume_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(consume_skb);
-/**
- * skb_recycle - clean up an skb for reuse
- * @skb: buffer
- *
- * Recycles the skb to be reused as a receive buffer. This
- * function does any necessary reference count dropping, and
- * cleans up the skbuff as if it just came from __alloc_skb().
- */
-void skb_recycle(struct sk_buff *skb)
-{
- struct skb_shared_info *shinfo;
-
- skb_release_head_state(skb);
-
- shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
- atomic_set(&shinfo->dataref, 1);
-
- memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = skb->head + NET_SKB_PAD;
- skb_reset_tail_pointer(skb);
-}
-EXPORT_SYMBOL(skb_recycle);
-
-/**
- * skb_recycle_check - check if skb can be reused for receive
- * @skb: buffer
- * @skb_size: minimum receive buffer size
- *
- * Checks that the skb passed in is not shared or cloned, and
- * that it is linear and its head portion at least as large as
- * skb_size so that it can be recycled as a receive buffer.
- * If these conditions are met, this function does any necessary
- * reference count dropping and cleans up the skbuff as if it
- * just came from __alloc_skb().
- */
-bool skb_recycle_check(struct sk_buff *skb, int skb_size)
-{
- if (!skb_is_recycleable(skb, skb_size))
- return false;
-
- skb_recycle(skb);
-
- return true;
-}
-EXPORT_SYMBOL(skb_recycle_check);
-
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;