if (chp_get_status(chpid) <= 0)
                return;
+       /* Wait until previous actions have settled. */
+       css_wait_for_slow_path();
        for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
 }
 
                sprintf(dbf_txt, "fla%x", res_data->fla);
                CIO_TRACE_EVENT( 2, dbf_txt);
        }
-
+       /* Wait until previous actions have settled. */
+       css_wait_for_slow_path();
        /*
         * I/O resources may have become accessible.
         * Scan through all subchannels that may be concerned and
        sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
 
-       if (chp_get_status(chpid) != 0)
+       if (chp_get_status(chpid) != 0) {
+               /* Wait until previous actions have settled. */
+               css_wait_for_slow_path();
                for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
                                           &chpid);
+       }
 }
 
 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
  */
 int chsc_chp_vary(struct chp_id chpid, int on)
 {
+       /* Wait until previous actions have settled. */
+       css_wait_for_slow_path();
        /*
         * Redo PathVerification on the devices the chpid connects to
         */
 
        spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 }
 
+void css_wait_for_slow_path(void)
+{
+       flush_workqueue(ccw_device_notify_work);
+       flush_workqueue(slow_path_wq);
+}
+
 /* Reprobe subchannel if unregistered. */
 static int reprobe_subchannel(struct subchannel_id schid, void *data)
 {