diff options
Diffstat (limited to 'net/xdp')
-rw-r--r-- | net/xdp/xsk.c | 23 | ||||
-rw-r--r-- | net/xdp/xsk_buff_pool.c | 40 | ||||
-rw-r--r-- | net/xdp/xsk_queue.h | 5 |
3 files changed, 44 insertions, 24 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7e16336044b2..1140b2a120ca 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1320,14 +1320,6 @@ struct xdp_umem_reg_v1 { __u32 headroom; }; -struct xdp_umem_reg_v2 { - __u64 addr; /* Start of packet data area */ - __u64 len; /* Length of packet data area */ - __u32 chunk_size; - __u32 headroom; - __u32 flags; -}; - static int xsk_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -1371,10 +1363,19 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(struct xdp_umem_reg_v1)) return -EINVAL; - else if (optlen < sizeof(struct xdp_umem_reg_v2)) - mr_size = sizeof(struct xdp_umem_reg_v1); else if (optlen < sizeof(mr)) - mr_size = sizeof(struct xdp_umem_reg_v2); + mr_size = sizeof(struct xdp_umem_reg_v1); + + BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg)); + + /* Make sure the last field of the struct doesn't have + * uninitialized padding. All padding has to be explicit + * and has to be set to zero by the userspace to make + * struct xdp_umem_reg extensible in the future. + */ + BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) + + sizeof_field(struct xdp_umem_reg, tx_metadata_len) != + sizeof(struct xdp_umem_reg)); if (copy_from_sockptr(&mr, optval, mr_size)) return -EFAULT; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index c0e0204b9630..521a2938e50a 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -211,6 +211,11 @@ int xp_assign_dev(struct xsk_buff_pool *pool, goto err_unreg_pool; } + if (dev_get_min_mp_channel_count(netdev)) { + err = -EBUSY; + goto err_unreg_pool; + } + bpf.command = XDP_SETUP_XSK_POOL; bpf.xsk.pool = pool; bpf.xsk.queue_id = queue_id; @@ -623,20 +628,31 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3 return nb_entries; } -u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp, + u32 max) { - u32 nb_entries1 = 0, nb_entries2; + int i; - if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) { + for (i = 0; i < max; i++) { struct xdp_buff *buff; - /* Slow path */ buff = xp_alloc(pool); - if (buff) - *xdp = buff; - return !!buff; + if (unlikely(!buff)) + return i; + *xdp = buff; + xdp++; } + return max; +} + +u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +{ + u32 nb_entries1 = 0, nb_entries2; + + if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) + return xp_alloc_slow(pool, xdp, max); + if (unlikely(pool->free_list_cnt)) { nb_entries1 = xp_alloc_reused(pool, xdp, max); if (nb_entries1 == max) @@ -656,9 +672,17 @@ EXPORT_SYMBOL(xp_alloc_batch); bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) { + u32 req_count, avail_count; + if (pool->free_list_cnt >= count) return true; - return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); + + req_count = count - pool->free_list_cnt; + avail_count = xskq_cons_nb_entries(pool->fq, req_count); + if (!avail_count) + pool->fq->queue_empty_descs++; + + return avail_count >= req_count; } EXPORT_SYMBOL(xp_can_alloc); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 6f2d1621c992..406b20dfee8d 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -306,11 +306,6 @@ static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) return entries >= max ? max : entries; } -static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) -{ - return xskq_cons_nb_entries(q, cnt) >= cnt; -} - static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_prod == q->cached_cons) |