{
        int index;
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
        if (!entry)
                return -1;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        irq_iommu = valid_irq_2_iommu(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                return -1;
        }
 
        index = irq_iommu->irte_index + irq_iommu->sub_handle;
        *entry = *(irq_iommu->iommu->ir_table->base + index);
 
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
        return 0;
 }
 
        struct irq_2_iommu *irq_iommu;
        u16 index, start_index;
        unsigned int mask = 0;
+       unsigned long flags;
        int i;
 
        if (!count)
                return -1;
        }
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        do {
                for (i = index; i < index + count; i++)
                        if  (table->base[i].present)
                index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
 
                if (index == start_index) {
-                       spin_unlock(&irq_2_ir_lock);
+                       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                        printk(KERN_ERR "can't allocate an IRTE\n");
                        return -1;
                }
 
        irq_iommu = irq_2_iommu_alloc(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                printk(KERN_ERR "can't allocate irq_2_iommu\n");
                return -1;
        }
        irq_iommu->sub_handle = 0;
        irq_iommu->irte_mask = mask;
 
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return index;
 }
 {
        int index;
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        irq_iommu = valid_irq_2_iommu(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                return -1;
        }
 
        *sub_handle = irq_iommu->sub_handle;
        index = irq_iommu->irte_index;
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
        return index;
 }
 
 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 {
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
 
        irq_iommu = irq_2_iommu_alloc(irq);
 
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                printk(KERN_ERR "can't allocate irq_2_iommu\n");
                return -1;
        }
        irq_iommu->sub_handle = subhandle;
        irq_iommu->irte_mask = 0;
 
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return 0;
 }
 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
 {
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        irq_iommu = valid_irq_2_iommu(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                return -1;
        }
 
        irq_iommu->sub_handle = 0;
        irq_2_iommu(irq)->irte_mask = 0;
 
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return 0;
 }
        struct irte *irte;
        struct intel_iommu *iommu;
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        irq_iommu = valid_irq_2_iommu(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                return -1;
        }
 
        __iommu_flush_cache(iommu, irte, sizeof(*irte));
 
        rc = qi_flush_iec(iommu, index, 0);
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return rc;
 }
        int index;
        struct intel_iommu *iommu;
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        irq_iommu = valid_irq_2_iommu(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                return -1;
        }
 
        index = irq_iommu->irte_index + irq_iommu->sub_handle;
 
        rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return rc;
 }
        struct irte *irte;
        struct intel_iommu *iommu;
        struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
 
-       spin_lock(&irq_2_ir_lock);
+       spin_lock_irqsave(&irq_2_ir_lock, flags);
        irq_iommu = valid_irq_2_iommu(irq);
        if (!irq_iommu) {
-               spin_unlock(&irq_2_ir_lock);
+               spin_unlock_irqrestore(&irq_2_ir_lock, flags);
                return -1;
        }
 
        irq_iommu->sub_handle = 0;
        irq_iommu->irte_mask = 0;
 
-       spin_unlock(&irq_2_ir_lock);
+       spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return rc;
 }