summaryrefslogtreecommitdiff
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorJames Morris <james.l.morris@oracle.com>2017-11-29 12:47:41 +1100
committerJames Morris <james.l.morris@oracle.com>2017-11-29 12:47:41 +1100
commitcf40a76e7d5874bb25f4404eecc58a2e033af885 (patch)
tree8fd81cbea03c87b3d41d7ae5b1d11eadd35d6ef5 /drivers/net/virtio_net.c
parentab5348c9c23cd253f5902980d2d8fe067dc24c82 (diff)
parent4fbd8d194f06c8a3fd2af1ce560ddb31f7ec8323 (diff)
Merge tag 'v4.15-rc1' into next-seccomp
Linux 4.15-rc1
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c444
1 files changed, 277 insertions, 167 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 99a26a9efec1..19a985ef9104 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
+#include <linux/filter.h>
#include <net/route.h>
static int napi_weight = NAPI_POLL_WEIGHT;
@@ -57,6 +58,13 @@ DECLARE_EWMA(pkt_len, 0, 64)
#define VIRTNET_DRIVER_VERSION "1.0.0"
+static const unsigned long guest_offloads[] = {
+ VIRTIO_NET_F_GUEST_TSO4,
+ VIRTIO_NET_F_GUEST_TSO6,
+ VIRTIO_NET_F_GUEST_ECN,
+ VIRTIO_NET_F_GUEST_UFO
+};
+
struct virtnet_stats {
struct u64_stats_sync tx_syncp;
struct u64_stats_sync rx_syncp;
@@ -164,10 +172,13 @@ struct virtnet_info {
u8 ctrl_promisc;
u8 ctrl_allmulti;
u16 ctrl_vid;
+ u64 ctrl_offloads;
/* Ethtool settings */
u8 duplex;
u32 speed;
+
+ unsigned long guest_offloads;
};
struct padded_vnet_hdr {
@@ -270,6 +281,23 @@ static void skb_xmit_done(struct virtqueue *vq)
netif_wake_subqueue(vi->dev, vq2txq(vq));
}
+#define MRG_CTX_HEADER_SHIFT 22
+static void *mergeable_len_to_ctx(unsigned int truesize,
+ unsigned int headroom)
+{
+ return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
+}
+
+static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
+{
+ return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
+}
+
+static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
+{
+ return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
+}
+
/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
@@ -292,7 +320,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
hdr_len = vi->hdr_len;
if (vi->mergeable_rx_bufs)
- hdr_padded_len = sizeof *hdr;
+ hdr_padded_len = sizeof(*hdr);
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
@@ -345,9 +373,20 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static bool virtnet_xdp_xmit(struct virtnet_info *vi,
- struct receive_queue *rq,
- struct xdp_buff *xdp)
+static void virtnet_xdp_flush(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq;
+ unsigned int qp;
+
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+ sq = &vi->sq[qp];
+
+ virtqueue_kick(sq->vq);
+}
+
+static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
+ struct xdp_buff *xdp)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
unsigned int len;
@@ -381,28 +420,104 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
return false;
}
- virtqueue_kick(sq->vq);
return true;
}
+static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ bool sent = __virtnet_xdp_xmit(vi, xdp);
+
+ if (!sent)
+ return -ENOSPC;
+ return 0;
+}
+
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
}
+/* We copy the packet for XDP in the following cases:
+ *
+ * 1) Packet is scattered across multiple rx buffers.
+ * 2) Headroom space is insufficient.
+ *
+ * This is inefficient but it's a temporary condition that
+ * we hit right after XDP is enabled and until queue is refilled
+ * with large buffers with sufficient headroom - so it should affect
+ * at most queue size packets.
+ * Afterwards, the conditions to enable
+ * XDP should preclude the underlying device from sending packets
+ * across multiple buffers (num_buf > 1), and we make sure buffers
+ * have enough headroom.
+ */
+static struct page *xdp_linearize_page(struct receive_queue *rq,
+ u16 *num_buf,
+ struct page *p,
+ int offset,
+ int page_off,
+ unsigned int *len)
+{
+ struct page *page = alloc_page(GFP_ATOMIC);
+
+ if (!page)
+ return NULL;
+
+ memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
+ page_off += *len;
+
+ while (--*num_buf) {
+ unsigned int buflen;
+ void *buf;
+ int off;
+
+ buf = virtqueue_get_buf(rq->vq, &buflen);
+ if (unlikely(!buf))
+ goto err_buf;
+
+ p = virt_to_head_page(buf);
+ off = buf - page_address(p);
+
+ /* guard against a misconfigured or uncooperative backend that
+ * is sending packet larger than the MTU.
+ */
+ if ((page_off + buflen) > PAGE_SIZE) {
+ put_page(p);
+ goto err_buf;
+ }
+
+ memcpy(page_address(page) + page_off,
+ page_address(p) + off, buflen);
+ page_off += buflen;
+ put_page(p);
+ }
+
+ /* Headroom does not contribute to packet length */
+ *len = page_off - VIRTIO_XDP_HEADROOM;
+ return page;
+err_buf:
+ __free_pages(page, 0);
+ return NULL;
+}
+
static struct sk_buff *receive_small(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
- void *buf, unsigned int len)
+ void *buf, void *ctx,
+ unsigned int len,
+ bool *xdp_xmit)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
- unsigned int xdp_headroom = virtnet_get_headroom(vi);
+ unsigned int xdp_headroom = (unsigned long)ctx;
unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
unsigned int headroom = vi->hdr_len + header_offset;
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- unsigned int delta = 0;
+ struct page *page = virt_to_head_page(buf);
+ unsigned int delta = 0, err;
+ struct page *xdp_page;
len -= vi->hdr_len;
rcu_read_lock();
@@ -416,8 +531,30 @@ static struct sk_buff *receive_small(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
goto err_xdp;
+ if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
+ int offset = buf - page_address(page) + header_offset;
+ unsigned int tlen = len + vi->hdr_len;
+ u16 num_buf = 1;
+
+ xdp_headroom = virtnet_get_headroom(vi);
+ header_offset = VIRTNET_RX_PAD + xdp_headroom;
+ headroom = vi->hdr_len + header_offset;
+ buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ xdp_page = xdp_linearize_page(rq, &num_buf, page,
+ offset, header_offset,
+ &tlen);
+ if (!xdp_page)
+ goto err_xdp;
+
+ buf = page_address(xdp_page);
+ put_page(page);
+ page = xdp_page;
+ }
+
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -428,8 +565,16 @@ static struct sk_buff *receive_small(struct net_device *dev,
delta = orig_data - xdp.data;
break;
case XDP_TX:
- if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
trace_xdp_exception(vi->dev, xdp_prog, act);
+ else
+ *xdp_xmit = true;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(dev, &xdp, xdp_prog);
+ if (!err)
+ *xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -444,7 +589,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
skb = build_skb(buf, buflen);
if (!skb) {
- put_page(virt_to_head_page(buf));
+ put_page(page);
goto err;
}
skb_reserve(skb, headroom - delta);
@@ -460,7 +605,7 @@ err:
err_xdp:
rcu_read_unlock();
dev->stats.rx_dropped++;
- put_page(virt_to_head_page(buf));
+ put_page(page);
xdp_xmit:
return NULL;
}
@@ -485,72 +630,13 @@ err:
return NULL;
}
-/* The conditions to enable XDP should preclude the underlying device from
- * sending packets across multiple buffers (num_buf > 1). However per spec
- * it does not appear to be illegal to do so but rather just against convention.
- * So in order to avoid making a system unresponsive the packets are pushed
- * into a page and the XDP program is run. This will be extremely slow and we
- * push a warning to the user to fix this as soon as possible. Fixing this may
- * require resolving the underlying hardware to determine why multiple buffers
- * are being received or simply loading the XDP program in the ingress stack
- * after the skb is built because there is no advantage to running it here
- * anymore.
- */
-static struct page *xdp_linearize_page(struct receive_queue *rq,
- u16 *num_buf,
- struct page *p,
- int offset,
- unsigned int *len)
-{
- struct page *page = alloc_page(GFP_ATOMIC);
- unsigned int page_off = VIRTIO_XDP_HEADROOM;
-
- if (!page)
- return NULL;
-
- memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
- page_off += *len;
-
- while (--*num_buf) {
- unsigned int buflen;
- void *buf;
- int off;
-
- buf = virtqueue_get_buf(rq->vq, &buflen);
- if (unlikely(!buf))
- goto err_buf;
-
- p = virt_to_head_page(buf);
- off = buf - page_address(p);
-
- /* guard against a misconfigured or uncooperative backend that
- * is sending packet larger than the MTU.
- */
- if ((page_off + buflen) > PAGE_SIZE) {
- put_page(p);
- goto err_buf;
- }
-
- memcpy(page_address(page) + page_off,
- page_address(p) + off, buflen);
- page_off += buflen;
- put_page(p);
- }
-
- /* Headroom does not contribute to packet length */
- *len = page_off - VIRTIO_XDP_HEADROOM;
- return page;
-err_buf:
- __free_pages(page, 0);
- return NULL;
-}
-
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf,
void *ctx,
- unsigned int len)
+ unsigned int len,
+ bool *xdp_xmit)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -559,6 +645,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
unsigned int truesize;
+ unsigned int headroom = mergeable_ctx_to_headroom(ctx);
+ int err;
head_skb = NULL;
@@ -571,10 +659,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
u32 act;
/* This happens when rx buffer size is underestimated */
- if (unlikely(num_buf > 1)) {
+ if (unlikely(num_buf > 1 ||
+ headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, &num_buf,
- page, offset, &len);
+ page, offset,
+ VIRTIO_XDP_HEADROOM,
+ &len);
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
@@ -596,9 +687,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ if (act != XDP_PASS)
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -614,18 +709,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
offset, len, PAGE_SIZE);
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
return head_skb;
}
break;
case XDP_TX:
- if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
trace_xdp_exception(vi->dev, xdp_prog, act);
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ else
+ *xdp_xmit = true;
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(dev, &xdp, xdp_prog);
+ if (!err)
+ *xdp_xmit = true;
+ rcu_read_unlock();
+ goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
@@ -633,19 +734,19 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
goto err_xdp;
}
}
rcu_read_unlock();
- if (unlikely(len > (unsigned long)ctx)) {
+ truesize = mergeable_ctx_to_truesize(ctx);
+ if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
dev->stats.rx_length_errors++;
goto err_skb;
}
- truesize = (unsigned long)ctx;
+
head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
curr_skb = head_skb;
@@ -665,13 +766,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
page = virt_to_head_page(buf);
- if (unlikely(len > (unsigned long)ctx)) {
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+ if (unlikely(len > truesize)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
dev->name, len, (unsigned long)ctx);
dev->stats.rx_length_errors++;
goto err_skb;
}
- truesize = (unsigned long)ctx;
num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
@@ -729,7 +831,7 @@ xdp_xmit:
}
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx)
+ void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
@@ -750,11 +852,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, buf, ctx, len);
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
else if (vi->big_packets)
skb = receive_big(dev, vi, rq, buf, len);
else
- skb = receive_small(dev, vi, rq, buf, len);
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
if (unlikely(!skb))
return 0;
@@ -787,12 +889,18 @@ frame_err:
return 0;
}
+/* Unlike mergeable buffers, all buffers are allocated to the
+ * same size, except for the headroom. For this reason we do
+ * not need to use mergeable_len_to_ctx here - it is enough
+ * to store the headroom as the context ignoring the truesize.
+ */
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
struct page_frag *alloc_frag = &rq->alloc_frag;
char *buf;
unsigned int xdp_headroom = virtnet_get_headroom(vi);
+ void *ctx = (void *)(unsigned long)xdp_headroom;
int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
int err;
@@ -806,10 +914,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
alloc_frag->offset += len;
sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
vi->hdr_len + GOOD_PACKET_LEN);
- err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
+ err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
-
return err;
}
@@ -889,21 +996,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- ctx = (void *)(unsigned long)len;
get_page(alloc_frag->page);
alloc_frag->offset += len + headroom;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + headroom) {
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
- * the current buffer. This extra space is not included in
- * the truesize stored in ctx.
+ * the current buffer.
*/
len += hole;
alloc_frag->offset += hole;
}
sg_init_one(rq->sg, buf, len);
+ ctx = mergeable_len_to_ctx(len, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -924,7 +1030,6 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
int err;
bool oom;
- gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, rq, gfp);
@@ -1008,25 +1113,25 @@ static void refill_work(struct work_struct *work)
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget)
+static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0;
void *buf;
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
- if (vi->mergeable_rx_bufs) {
+ if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
while (received < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
- bytes += receive_buf(vi, rq, buf, len, ctx);
+ bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
received++;
}
} else {
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- bytes += receive_buf(vi, rq, buf, len, NULL);
+ bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
received++;
}
}
@@ -1059,7 +1164,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
/* Avoid overhead when no packets have been processed
@@ -1098,15 +1203,19 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ bool xdp_xmit = false;
virtnet_poll_cleantx(rq);
- received = virtnet_receive(rq, budget);
+ received = virtnet_receive(rq, budget, &xdp_xmit);
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
+ if (xdp_xmit)
+ xdp_do_flush_map();
+
return received;
}
@@ -1814,7 +1923,6 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
}
static int init_vqs(struct virtnet_info *vi);
-static void _remove_vq_common(struct virtnet_info *vi);
static int virtnet_restore_up(struct virtio_device *vdev)
{
@@ -1843,37 +1951,45 @@ static int virtnet_restore_up(struct virtio_device *vdev)
return err;
}
-static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp)
+static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
- struct virtio_device *dev = vi->vdev;
- int ret;
+ struct scatterlist sg;
+ vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
- virtio_config_disable(dev);
- dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
- virtnet_freeze_down(dev);
- _remove_vq_common(vi);
+ sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
- virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
- virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
+ dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
+ return -EINVAL;
+ }
- ret = virtio_finalize_features(dev);
- if (ret)
- goto err;
+ return 0;
+}
- vi->xdp_queue_pairs = xdp_qp;
- ret = virtnet_restore_up(dev);
- if (ret)
- goto err;
- ret = _virtnet_set_queues(vi, curr_qp);
- if (ret)
- goto err;
+static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
+{
+ u64 offloads = 0;
- virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- virtio_config_enable(dev);
- return 0;
-err:
- virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
- return ret;
+ if (!vi->guest_offloads)
+ return 0;
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
+ offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
+
+ return virtnet_set_guest_offloads(vi, offloads);
+}
+
+static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
+{
+ u64 offloads = vi->guest_offloads;
+
+ if (!vi->guest_offloads)
+ return 0;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
+ offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
+
+ return virtnet_set_guest_offloads(vi, offloads);
}
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
@@ -1885,10 +2001,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
u16 xdp_qp = 0, curr_qp;
int i, err;
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
- virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
+ && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
return -EOPNOTSUPP;
}
@@ -1922,35 +2039,35 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
return PTR_ERR(prog);
}
- /* Changing the headroom in buffers is a disruptive operation because
- * existing buffers must be flushed and reallocated. This will happen
- * when a xdp program is initially added or xdp is disabled by removing
- * the xdp program resulting in number of XDP queues changing.
- */
- if (vi->xdp_queue_pairs != xdp_qp) {
- err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp);
- if (err) {
- dev_warn(&dev->dev, "XDP reset failure.\n");
- goto virtio_reset_err;
- }
- }
+ /* Make sure NAPI is not using any XDP TX queues for RX. */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ napi_disable(&vi->rq[i].napi);
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+ err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+ if (err)
+ goto err;
+ vi->xdp_queue_pairs = xdp_qp;
for (i = 0; i < vi->max_queue_pairs; i++) {
old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+ if (i == 0) {
+ if (!old_prog)
+ virtnet_clear_guest_offloads(vi);
+ if (!prog)
+ virtnet_restore_guest_offloads(vi);
+ }
if (old_prog)
bpf_prog_put(old_prog);
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
}
return 0;
-virtio_reset_err:
- /* On reset error do our best to unwind XDP changes inflight and return
- * error up to user space for resolution. The underlying reset hung on
- * us so not much we can do here.
- */
+err:
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
return err;
@@ -1970,7 +2087,7 @@ static u32 virtnet_xdp_query(struct net_device *dev)
return 0;
}
-static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -1997,7 +2114,9 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
- .ndo_xdp = virtnet_xdp,
+ .ndo_bpf = virtnet_xdp,
+ .ndo_xdp_xmit = virtnet_xdp_xmit,
+ .ndo_xdp_flush = virtnet_xdp_flush,
.ndo_features_check = passthru_features_check,
};
@@ -2183,7 +2302,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
- if (vi->mergeable_rx_bufs) {
+ if (!vi->big_packets || vi->mergeable_rx_bufs) {
ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
if (!ctx)
goto err_ctx;
@@ -2304,7 +2423,7 @@ err:
#ifdef CONFIG_SYSFS
static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attribute, char *buf)
+ char *buf)
{
struct virtnet_info *vi = netdev_priv(queue->dev);
unsigned int queue_index = get_netdev_rx_queue_index(queue);
@@ -2429,7 +2548,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
- dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+ dev->hw_features |= NETIF_F_TSO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
@@ -2439,13 +2558,11 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_TSO6;
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
dev->hw_features |= NETIF_F_TSO_ECN;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
- dev->hw_features |= NETIF_F_UFO;
dev->features |= NETIF_F_GSO_ROBUST;
if (gso)
- dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+ dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -2578,6 +2695,10 @@ static int virtnet_probe(struct virtio_device *vdev)
netif_carrier_on(dev);
}
+ for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
+ if (virtio_has_feature(vi->vdev, guest_offloads[i]))
+ set_bit(guest_offloads[i], &vi->guest_offloads);
+
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
@@ -2598,15 +2719,6 @@ free:
return err;
}
-static void _remove_vq_common(struct virtnet_info *vi)
-{
- vi->vdev->config->reset(vi->vdev);
- free_unused_bufs(vi);
- _free_receive_bufs(vi);
- free_receive_page_frags(vi);
- virtnet_del_vqs(vi);
-}
-
static void remove_vq_common(struct virtnet_info *vi)
{
vi->vdev->config->reset(vi->vdev);
@@ -2638,8 +2750,7 @@ static void virtnet_remove(struct virtio_device *vdev)
free_netdev(vi->dev);
}
-#ifdef CONFIG_PM_SLEEP
-static int virtnet_freeze(struct virtio_device *vdev)
+static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
@@ -2650,7 +2761,7 @@ static int virtnet_freeze(struct virtio_device *vdev)
return 0;
}
-static int virtnet_restore(struct virtio_device *vdev)
+static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
int err;
@@ -2666,7 +2777,6 @@ static int virtnet_restore(struct virtio_device *vdev)
return 0;
}
-#endif
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
@@ -2683,7 +2793,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
- VIRTIO_NET_F_MTU
+ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
static unsigned int features[] = {
VIRTNET_FEATURES,
@@ -2743,9 +2853,9 @@ module_init(virtio_net_driver_init);
static __exit void virtio_net_driver_exit(void)
{
+ unregister_virtio_driver(&virtio_net_driver);
cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
cpuhp_remove_multi_state(virtionet_online);
- unregister_virtio_driver(&virtio_net_driver);
}
module_exit(virtio_net_driver_exit);