for (irq = 0; irq < NR_IRQS; irq++) {
                cpumask_t mask;
+               int break_affinity = 0;
+               int set_affinity = 1;
+
                if (irq == 2)
                        continue;
 
+               /* interrupt's are disabled at this point */
+               spin_lock(&irq_desc[irq].lock);
+
+               if (!irq_has_action(irq) ||
+                   cpus_equal(irq_desc[irq].affinity, map)) {
+                       spin_unlock(&irq_desc[irq].lock);
+                       continue;
+               }
+
                cpus_and(mask, irq_desc[irq].affinity, map);
-               if (any_online_cpu(mask) == NR_CPUS) {
-                       printk("Breaking affinity for irq %i\n", irq);
+               if (cpus_empty(mask)) {
+                       break_affinity = 1;
                        mask = map;
                }
+
+               if (irq_desc[irq].chip->mask)
+                       irq_desc[irq].chip->mask(irq);
+
                if (irq_desc[irq].chip->set_affinity)
                        irq_desc[irq].chip->set_affinity(irq, mask);
-               else if (irq_desc[irq].action && !(warned++))
+               else if (!(warned++))
+                       set_affinity = 0;
+
+               if (irq_desc[irq].chip->unmask)
+                       irq_desc[irq].chip->unmask(irq);
+
+               spin_unlock(&irq_desc[irq].lock);
+
+               if (break_affinity && set_affinity)
+                       printk("Broke affinity for irq %i\n", irq);
+               else if (!set_affinity)
                        printk("Cannot set affinity for irq %i\n", irq);
        }