summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-02-20 13:16:58 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2013-02-28 11:48:17 +1100
commitb998ea1fd1b843987299ebd6b65c15c15ab2ebd2 (patch)
tree47a04f72ba80b664d03036a32dbeb3b027f792b3
parentaf0cef56ccecde31c7b558d843a649726a3db87b (diff)
aio: document, clarify aio_read_events() and shadow_tail
Signed-off-by: Kent Overstreet <koverstreet@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--fs/aio.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 44856006c8f8..84765f8ff423 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -103,6 +103,19 @@ struct kioctx {
struct {
struct mutex ring_lock;
wait_queue_head_t wait;
+
+ /*
+ * Copy of the real tail, that aio_complete uses - to reduce
+ * cacheline bouncing. The real tail will tend to be much more
+ * contended - since typically events are delivered one at a
+ * time, and then aio_read_events() slurps them up a bunch at a
+ * time - so it's helpful if aio_read_events() isn't also
+ * contending for the tail. So, aio_complete() updates
+ * shadow_tail whenever it updates tail.
+ *
+ * Also needed because tail is used as a hacky lock and isn't
+ * always the real tail.
+ */
unsigned shadow_tail;
} ____cacheline_aligned_in_smp;
@@ -860,10 +873,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
long ret = 0;
int copy_ret;
- if (!mutex_trylock(&ctx->ring_lock)) {
- __set_current_state(TASK_RUNNING);
- mutex_lock(&ctx->ring_lock);
- }
+ mutex_lock(&ctx->ring_lock);
ring = kmap_atomic(ctx->ring_pages[0]);
head = ring->head;
@@ -874,8 +884,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
if (head == ctx->shadow_tail)
goto out;
- __set_current_state(TASK_RUNNING);
-
while (ret < nr) {
long avail = (head < ctx->shadow_tail
? ctx->shadow_tail : ctx->nr) - head;
@@ -954,6 +962,20 @@ static long read_events(struct kioctx *ctx, long min_nr, long nr,
until = timespec_to_ktime(ts);
}
+ /*
+ * Note that aio_read_events() is being called as the conditional - i.e.
+ * we're calling it after prepare_to_wait() has set task state to
+ * TASK_INTERRUPTIBLE.
+ *
+ * But aio_read_events() can block, and if it blocks it's going to flip
+ * the task state back to TASK_RUNNING.
+ *
+ * This should be ok, provided it doesn't flip the state back to
+ * TASK_RUNNING and return 0 too much - that causes us to spin. That
+ * will only happen if the mutex_lock() call blocks, and we then find
+ * the ringbuffer empty. So in practice we should be ok, but it's
+ * something to be aware of when touching this code.
+ */
wait_event_interruptible_hrtimeout(ctx->wait,
aio_read_events(ctx, min_nr, nr, event, &ret), until);