diff options
Diffstat (limited to 'net/core/gro.c')
-rw-r--r-- | net/core/gro.c | 14 |
1 files changed, 5 insertions, 9 deletions
diff --git a/net/core/gro.c b/net/core/gro.c index b3b43de1a650..d1f44084e978 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -98,7 +98,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; - unsigned int gro_max_size; unsigned int new_truesize; struct sk_buff *lp; int segs; @@ -112,12 +111,8 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) if (p->pp_recycle != skb->pp_recycle) return -ETOOMANYREFS; - /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */ - gro_max_size = p->protocol == htons(ETH_P_IPV6) ? - READ_ONCE(p->dev->gro_max_size) : - READ_ONCE(p->dev->gro_ipv4_max_size); - - if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) + if (unlikely(p->len + len >= netif_get_gro_max_size(p->dev, p) || + NAPI_GRO_CB(skb)->flush)) return -E2BIG; if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { @@ -374,7 +369,7 @@ static void gro_list_prepare(const struct list_head *head, skb_mac_header(skb), maclen); - /* in most common scenarions 'slow_gro' is 0 + /* in most common scenarios 'slow_gro' is 0 * otherwise we are already on some slower paths * either skip all the infrequent tests altogether or * avoid trying too hard to skip each of them individually @@ -408,7 +403,8 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) pinfo = skb_shinfo(skb); frag0 = &pinfo->frags[0]; - if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) && + if (pinfo->nr_frags && skb_frag_page(frag0) && + !PageHighMem(skb_frag_page(frag0)) && (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, |