wait_event(bitmap->mddev->recovery_wait,
                   atomic_read(&bitmap->mddev->recovery_active) == 0);
 
+       bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
+       set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
        sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
        s = 0;
        while (s < sector && s < bitmap->mddev->resync_max_sectors) {
 
        }
 
        if (rdev->raid_disk >= 0 &&
-           !test_bit(In_sync, &rdev->flags) &&
-           rdev->recovery_offset > 0) {
-               sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
-               sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
+           !test_bit(In_sync, &rdev->flags)) {
+               if (mddev->curr_resync_completed > rdev->recovery_offset)
+                       rdev->recovery_offset = mddev->curr_resync_completed;
+               if (rdev->recovery_offset > 0) {
+                       sb->feature_map |=
+                               cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+                       sb->recovery_offset =
+                               cpu_to_le64(rdev->recovery_offset);
+               }
        }
 
        if (mddev->reshape_position != MaxSector) {
                }
                if (kthread_should_stop())
                        goto interrupted;
+
+               if (mddev->curr_resync > mddev->curr_resync_completed &&
+                   (mddev->curr_resync - mddev->curr_resync_completed)
+                   > (max_sectors >> 4)) {
+                       /* time to update curr_resync_completed */
+                       blk_unplug(mddev->queue);
+                       wait_event(mddev->recovery_wait,
+                                  atomic_read(&mddev->recovery_active) == 0);
+                       mddev->curr_resync_completed =
+                               mddev->curr_resync;
+                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               }
                sectors = mddev->pers->sync_request(mddev, j, &skipped,
                                                  currspeed < speed_min(mddev));
                if (sectors == 0) {
        mdk_rdev_t *rdev;
        int spares = 0;
 
+       mddev->curr_resync_completed = 0;
+
        list_for_each_entry(rdev, &mddev->disks, same_set)
                if (rdev->raid_disk >= 0 &&
                    !test_bit(Blocked, &rdev->flags) &&
 
        struct mdk_thread_s             *thread;        /* management thread */
        struct mdk_thread_s             *sync_thread;   /* doing resync or reconstruct */
        sector_t                        curr_resync;    /* last block scheduled */
+       /* As resync requests can complete out of order, we cannot easily track
+        * how much resync has been completed.  So we occasionally pause until
+        * everything completes, then set curr_resync_completed to curr_resync.
+        * As such it may be well behind the real resync mark, but it is a value
+        * we are certain of.
+        */
+       sector_t                        curr_resync_completed;
        unsigned long                   resync_mark;    /* a recent timestamp */
        sector_t                        resync_mark_cnt;/* blocks written at resync_mark */
        sector_t                        curr_mark_cnt; /* blocks scheduled now */