return 1;
}
- /*
- * Check if the user asked us to deliver the result through an
- * eventfd. The eventfd_signal() function is safe to be called
- * from IRQ context.
- */
- if (!IS_ERR(iocb->ki_eventfd))
- eventfd_signal(iocb->ki_eventfd, 1);
-
info = &ctx->ring_info;
/* add a completion event to the ring buffer.
kunmap_atomic(ring, KM_IRQ1);
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
+
+ /*
+ * Check if the user asked us to deliver the result through an
+ * eventfd. The eventfd_signal() function is safe to be called
+ * from IRQ context.
+ */
+ if (!IS_ERR(iocb->ki_eventfd))
+ eventfd_signal(iocb->ki_eventfd, 1);
+
put_rq:
/* everything turned out well, dispose of the aiocb. */
ret = __aio_put_req(ctx, iocb);
+ /*
+ * We have to order our ring_info tail store above and test
+ * of the wait list below outside the wait lock. This is
+ * like in wake_up_bit() where clearing a bit has to be
+ * ordered with the unlocked test.
+ */
+ smp_mb();
+
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
break;
if (min_nr <= i)
break;
- ret = 0;
+ if (unlikely(ctx->dead)) {
+ ret = -EINVAL;
+ break;
+ }
if (to.timed_out) /* Only check after read evt */
break;
/* Try to only show up in io wait if there are ops
aio_cancel_all(ioctx);
wait_for_all_aios(ioctx);
+
+ /*
+ * Wake up any waiters. The setting of ctx->dead must be seen
+ * by other CPUs at this point. Right now, we rely on the
+ * locking done by the above calls to ensure this consistency.
+ */
+ wake_up(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}
put_ioctx(ioctx);
}
+ asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
return ret;
}