atomic_t pending;
        int error;
        sector_t sector;
+       struct dm_crypt_io *base_io;
 };
 
 struct dm_crypt_request {
        io->base_bio = bio;
        io->sector = sector;
        io->error = 0;
+       io->base_io = NULL;
        atomic_set(&io->pending, 0);
 
        return io;
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
+ * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
        if (!atomic_dec_and_test(&io->pending))
                return;
 
-       bio_endio(io->base_bio, io->error);
+       if (likely(!io->base_io))
+               bio_endio(io->base_bio, io->error);
+       else {
+               if (io->error && !io->base_io->error)
+                       io->base_io->error = io->error;
+               crypt_dec_pending(io->base_io);
+       }
+
        mempool_free(io, cc->io_pool);
 }
 
 {
        struct crypt_config *cc = io->target->private;
        struct bio *clone;
+       struct dm_crypt_io *new_io;
        int crypt_finished;
        unsigned out_of_pages = 0;
        unsigned remaining = io->base_bio->bi_size;
                if (unlikely(out_of_pages))
                        congestion_wait(WRITE, HZ/100);
 
+               /*
+                * With async crypto it is unsafe to share the crypto context
+                * between fragments, so switch to a new dm_crypt_io structure.
+                */
+               if (unlikely(!crypt_finished && remaining)) {
+                       new_io = crypt_io_alloc(io->target, io->base_bio,
+                                               sector);
+                       crypt_inc_pending(new_io);
+                       crypt_convert_init(cc, &new_io->ctx, NULL,
+                                          io->base_bio, sector);
+                       new_io->ctx.idx_in = io->ctx.idx_in;
+                       new_io->ctx.offset_in = io->ctx.offset_in;
+
+                       /*
+                        * Fragments after the first use the base_io
+                        * pending count.
+                        */
+                       if (!io->base_io)
+                               new_io->base_io = io;
+                       else {
+                               new_io->base_io = io->base_io;
+                               crypt_inc_pending(io->base_io);
+                               crypt_dec_pending(io);
+                       }
+
+                       io = new_io;
+               }
+
                if (unlikely(remaining))
                        wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
        }