summaryrefslogtreecommitdiff
path: root/net/xdp/xsk_buff_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/xdp/xsk_buff_pool.c')
-rw-r--r--net/xdp/xsk_buff_pool.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 64c9e55d4d4e..556d82d03687 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -144,14 +144,13 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool,
if (err)
return err;
- if (flags & XDP_USE_NEED_WAKEUP) {
+ if (flags & XDP_USE_NEED_WAKEUP)
pool->uses_need_wakeup = true;
- /* Tx needs to be explicitly woken up the first time.
- * Also for supporting drivers that do not implement this
- * feature. They will always have to call sendto().
- */
- pool->cached_need_wakeup = XDP_WAKEUP_TX;
- }
+ /* Tx needs to be explicitly woken up the first time. Also
+ * for supporting drivers that do not implement this
+ * feature. They will always have to call sendto() or poll().
+ */
+ pool->cached_need_wakeup = XDP_WAKEUP_TX;
dev_hold(netdev);
@@ -185,8 +184,10 @@ err_unreg_xsk:
err_unreg_pool:
if (!force_zc)
err = 0; /* fallback to copy mode */
- if (err)
+ if (err) {
xsk_clear_pool_at_qid(netdev, queue_id);
+ dev_put(netdev);
+ }
return err;
}
@@ -242,7 +243,7 @@ static void xp_release_deferred(struct work_struct *work)
pool->cq = NULL;
}
- xdp_put_umem(pool->umem);
+ xdp_put_umem(pool->umem, false);
xp_destroy(pool);
}
@@ -251,15 +252,18 @@ void xp_get_pool(struct xsk_buff_pool *pool)
refcount_inc(&pool->users);
}
-void xp_put_pool(struct xsk_buff_pool *pool)
+bool xp_put_pool(struct xsk_buff_pool *pool)
{
if (!pool)
- return;
+ return false;
if (refcount_dec_and_test(&pool->users)) {
INIT_WORK(&pool->work, xp_release_deferred);
schedule_work(&pool->work);
+ return true;
}
+
+ return false;
}
static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)