summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-01-24 13:14:52 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2013-02-04 18:01:10 +1100
commitf5a5d743a539197599e3a5f1d7ee2e707a3bb0c2 (patch)
treea6c4d1c05b6c661fc7a9614c41a8ab77e65dfbe1 /fs
parentd5612bdfa2226650c3abe9efab0b1af4fe36a5c3 (diff)
aio: document, clarify aio_read_events() and shadow_tail
Signed-off-by: Kent Overstreet <koverstreet@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/fs/aio.c b/fs/aio.c
index ddb587a9259f..62573d34310c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -103,6 +103,19 @@ struct kioctx {
struct {
struct mutex ring_lock;
wait_queue_head_t wait;
+
+ /*
+ * Copy of the real tail, that aio_complete uses - to reduce
+ * cacheline bouncing. The real tail will tend to be much more
+ * contended - since typically events are delivered one at a
+ * time, and then aio_read_events() slurps them up a bunch at a
+ * time - so it's helpful if aio_read_events() isn't also
+ * contending for the tail. So, aio_complete() updates
+ * shadow_tail whenever it updates tail.
+ *
+ * Also needed because tail is used as a hacky lock and isn't
+ * always the real tail.
+ */
unsigned shadow_tail;
} ____cacheline_aligned_in_smp;
@@ -848,10 +861,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
long ret = 0;
int copy_ret;
- if (!mutex_trylock(&ctx->ring_lock)) {
- __set_current_state(TASK_RUNNING);
- mutex_lock(&ctx->ring_lock);
- }
+ mutex_lock(&ctx->ring_lock);
ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
@@ -862,8 +872,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
if (head == ctx->shadow_tail)
goto out;
- __set_current_state(TASK_RUNNING);
-
while (ret < nr) {
long avail = (head < ctx->shadow_tail
? ctx->shadow_tail : ctx->nr) - head;
@@ -942,6 +950,20 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
until = timespec_to_ktime(ts);
}
+ /*
+ * Note that aio_read_events() is being called as the conditional - i.e.
+ * we're calling it after prepare_to_wait() has set task state to
+ * TASK_INTERRUPTIBLE.
+ *
+ * But aio_read_events() can block, and if it blocks it's going to flip
+ * the task state back to TASK_RUNNING.
+ *
+ * This should be ok, provided it doesn't flip the state back to
+ * TASK_RUNNING and return 0 too much - that causes us to spin. That
+ * will only happen if the mutex_lock() call blocks, and we then find
+ * the ringbuffer empty. So in practice we should be ok, but it's
+ * something to be aware of when touching this code.
+ */
wait_event_interruptible_hrtimeout(ctx->wait,
aio_read_events(ctx, min_nr, nr, event, &ret), until);