char *ptr, *buf = NULL;
        int err = -ENOMEM;
 
-       md_allow_write(mddev);
+       if (md_allow_write(mddev))
+               file = kmalloc(sizeof(*file), GFP_NOIO);
+       else
+               file = kmalloc(sizeof(*file), GFP_KERNEL);
 
-       file = kmalloc(sizeof(*file), GFP_KERNEL);
        if (!file)
                goto out;
 
  * may proceed without blocking.  It is important to call this before
  * attempting a GFP_KERNEL allocation while holding the mddev lock.
  * Must be called with mddev_lock held.
+ *
+ * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
+ * is dropped, so return -EAGAIN after notifying userspace.
  */
-void md_allow_write(mddev_t *mddev)
+int md_allow_write(mddev_t *mddev)
 {
        if (!mddev->pers)
-               return;
+               return 0;
        if (mddev->ro)
-               return;
+               return 0;
        if (!mddev->pers->sync_request)
-               return;
+               return 0;
 
        spin_lock_irq(&mddev->write_lock);
        if (mddev->in_sync) {
                        mddev->safemode = 1;
                spin_unlock_irq(&mddev->write_lock);
                md_update_sb(mddev, 0);
-
                sysfs_notify(&mddev->kobj, NULL, "array_state");
-               /* wait for the dirty state to be recorded in the metadata */
-               wait_event(mddev->sb_wait,
-                          !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
-                          !test_bit(MD_CHANGE_PENDING, &mddev->flags));
        } else
                spin_unlock_irq(&mddev->write_lock);
+
+       if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+               return -EAGAIN;
+       else
+               return 0;
 }
 EXPORT_SYMBOL_GPL(md_allow_write);
 
 
        conf_t *conf = mddev_to_conf(mddev);
        int cnt, raid_disks;
        unsigned long flags;
-       int d, d2;
+       int d, d2, err;
 
        /* Cannot change chunk_size, layout, or level */
        if (mddev->chunk_size != mddev->new_chunk ||
                return -EINVAL;
        }
 
-       md_allow_write(mddev);
+       err = md_allow_write(mddev);
+       if (err)
+               return err;
 
        raid_disks = mddev->raid_disks + mddev->delta_disks;
 
 
        struct stripe_head *osh, *nsh;
        LIST_HEAD(newstripes);
        struct disk_info *ndisks;
-       int err = 0;
+       int err;
        struct kmem_cache *sc;
        int i;
 
        if (newsize <= conf->pool_size)
                return 0; /* never bother to shrink */
 
-       md_allow_write(conf->mddev);
+       err = md_allow_write(conf->mddev);
+       if (err)
+               return err;
 
        /* Step 1 */
        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
 {
        raid5_conf_t *conf = mddev_to_conf(mddev);
        unsigned long new;
+       int err;
+
        if (len >= PAGE_SIZE)
                return -EINVAL;
        if (!conf)
                else
                        break;
        }
-       md_allow_write(mddev);
+       err = md_allow_write(mddev);
+       if (err)
+               return err;
        while (new > conf->max_nr_stripes) {
                if (grow_one_stripe(conf))
                        conf->max_nr_stripes++;
 
                        struct page *page, int rw);
 extern void md_do_sync(mddev_t *mddev);
 extern void md_new_event(mddev_t *mddev);
-extern void md_allow_write(mddev_t *mddev);
+extern int md_allow_write(mddev_t *mddev);
 extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
 
 #endif /* CONFIG_MD */