* Need to know about CPUs going idle?
  */
 static ATOMIC_NOTIFIER_HEAD(idle_chain);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 int register_idle_notifier(struct notifier_block *nb)
 {
 }
 EXPORT_SYMBOL(unregister_idle_notifier);
 
-void do_monitor_call(struct pt_regs *regs, long interruption_code)
+static int s390_idle_enter(void)
+{
+       struct s390_idle_data *idle;
+       int nr_calls = 0;
+       void *hcpu;
+       int rc;
+
+       hcpu = (void *)(long)smp_processor_id();
+       rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
+                                         &nr_calls);
+       if (rc == NOTIFY_BAD) {
+               nr_calls--;
+               __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
+                                            hcpu, nr_calls, NULL);
+               return rc;
+       }
+       idle = &__get_cpu_var(s390_idle);
+       spin_lock(&idle->lock);
+       idle->idle_count++;
+       idle->in_idle = 1;
+       idle->idle_enter = get_clock();
+       spin_unlock(&idle->lock);
+       return NOTIFY_OK;
+}
+
+void s390_idle_leave(void)
 {
-#ifdef CONFIG_SMP
        struct s390_idle_data *idle;
 
        idle = &__get_cpu_var(s390_idle);
        idle->idle_time += get_clock() - idle->idle_enter;
        idle->in_idle = 0;
        spin_unlock(&idle->lock);
-#endif
-       /* disable monitor call class 0 */
-       __ctl_clear_bit(8, 15);
-
        atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
                                   (void *)(long) smp_processor_id());
 }
  */
 static void default_idle(void)
 {
-       int cpu, rc;
-       int nr_calls = 0;
-       void *hcpu;
-#ifdef CONFIG_SMP
-       struct s390_idle_data *idle;
-#endif
-
        /* CPU is going idle. */
-       cpu = smp_processor_id();
-       hcpu = (void *)(long)cpu;
        local_irq_disable();
        if (need_resched()) {
                local_irq_enable();
                return;
        }
-
-       rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
-                                         &nr_calls);
-       if (rc == NOTIFY_BAD) {
-               nr_calls--;
-               __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
-                                            hcpu, nr_calls, NULL);
+       if (s390_idle_enter() == NOTIFY_BAD) {
                local_irq_enable();
                return;
        }
-
-       /* enable monitor call class 0 */
-       __ctl_set_bit(8, 15);
-
 #ifdef CONFIG_HOTPLUG_CPU
-       if (cpu_is_offline(cpu)) {
+       if (cpu_is_offline(smp_processor_id())) {
                preempt_enable_no_resched();
                cpu_die();
        }
 #endif
-
        local_mcck_disable();
        if (test_thread_flag(TIF_MCCK_PENDING)) {
                local_mcck_enable();
-               /* disable monitor call class 0 */
-               __ctl_clear_bit(8, 15);
-               atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
-                                          hcpu);
+               s390_idle_leave();
                local_irq_enable();
                s390_handle_mcck();
                return;
        }
-#ifdef CONFIG_SMP
-       idle = &__get_cpu_var(s390_idle);
-       spin_lock(&idle->lock);
-       idle->idle_count++;
-       idle->in_idle = 1;
-       idle->idle_enter = get_clock();
-       spin_unlock(&idle->lock);
-#endif
        trace_hardirqs_on();
        /* Wait for external, I/O or machine check interrupt. */
        __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |