struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
                                                             GFP_NOIO);
 
+       atomic_inc(&s->pending_exceptions_count);
        pe->snap = s;
 
        return pe;
 
 static void free_pending_exception(struct dm_snap_pending_exception *pe)
 {
-       mempool_free(pe, pe->snap->pending_pool);
+       struct dm_snapshot *s = pe->snap;
+
+       mempool_free(pe, s->pending_pool);
+       smp_mb__before_atomic_dec();
+       atomic_dec(&s->pending_exceptions_count);
 }
 
 static void insert_completed_exception(struct dm_snapshot *s,
 
        s->valid = 1;
        s->active = 0;
+       atomic_set(&s->pending_exceptions_count, 0);
        init_rwsem(&s->lock);
        spin_lock_init(&s->pe_lock);
        s->ti = ti;
        /* After this returns there can be no new kcopyd jobs. */
        unregister_snapshot(s);
 
+       while (atomic_read(&s->pending_exceptions_count))
+               yield();
+       /*
+        * Ensure instructions in mempool_destroy aren't reordered
+        * before atomic_read.
+        */
+       smp_mb();
+
 #ifdef CONFIG_DM_DEBUG
        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
                BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));