summaryrefslogtreecommitdiff
path: root/io_uring/notif.c
blob: 11f45640684a1f4523e46745da170dbd969255bd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/io_uring.h>

#include "io_uring.h"
#include "notif.h"
#include "rsrc.h"

static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
{
	struct io_notif_data *nd = io_notif_to_data(notif);
	struct io_ring_ctx *ctx = notif->ctx;

	if (nd->account_pages && ctx->user) {
		__io_unaccount_mem(ctx->user, nd->account_pages);
		nd->account_pages = 0;
	}
	io_req_task_complete(notif, locked);
}

static inline void io_notif_complete(struct io_kiocb *notif)
	__must_hold(&notif->ctx->uring_lock)
{
	bool locked = true;

	__io_notif_complete_tw(notif, &locked);
}

static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
					  struct ubuf_info *uarg,
					  bool success)
{
	struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
	struct io_kiocb *notif = cmd_to_io_kiocb(nd);

	if (refcount_dec_and_test(&uarg->refcnt)) {
		notif->io_task_work.func = __io_notif_complete_tw;
		io_req_task_work_add(notif);
	}
}

struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
				struct io_notif_slot *slot)
	__must_hold(&ctx->uring_lock)
{
	struct io_kiocb *notif;
	struct io_notif_data *nd;

	if (unlikely(!io_alloc_req_refill(ctx)))
		return NULL;
	notif = io_alloc_req(ctx);
	notif->opcode = IORING_OP_NOP;
	notif->flags = 0;
	notif->file = NULL;
	notif->task = current;
	io_get_task_refs(1);
	notif->rsrc_node = NULL;
	io_req_set_rsrc_node(notif, ctx, 0);
	notif->cqe.user_data = slot->tag;
	notif->cqe.flags = slot->seq++;
	notif->cqe.res = 0;

	nd = io_notif_to_data(notif);
	nd->account_pages = 0;
	nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
	nd->uarg.callback = io_uring_tx_zerocopy_callback;
	/* master ref owned by io_notif_slot, will be dropped on flush */
	refcount_set(&nd->uarg.refcnt, 1);
	return notif;
}

void io_notif_slot_flush(struct io_notif_slot *slot)
	__must_hold(&slot->notif->ctx->uring_lock)
{
	struct io_kiocb *notif = slot->notif;
	struct io_notif_data *nd = io_notif_to_data(notif);

	slot->notif = NULL;

	/* drop slot's master ref */
	if (refcount_dec_and_test(&nd->uarg.refcnt)) {
		notif->io_task_work.func = __io_notif_complete_tw;
		io_req_task_work_add(notif);
	}
}