3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
11 #include <linux/irq.h>
12 #include <linux/interrupt.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/smp_lock.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
18 #include <linux/msi.h>
20 #include <asm/errno.h>
27 static DEFINE_SPINLOCK(msi_lock);
28 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
29 static kmem_cache_t* msi_cachep;
31 static int pci_msi_enable = 1;
33 static int msi_cache_init(void)
35 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
36 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
43 static void msi_set_mask_bit(unsigned int irq, int flag)
45 struct msi_desc *entry;
47 entry = msi_desc[irq];
48 BUG_ON(!entry || !entry->dev);
49 switch (entry->msi_attrib.type) {
51 if (entry->msi_attrib.maskbit) {
55 pos = (long)entry->mask_base;
56 pci_read_config_dword(entry->dev, pos, &mask_bits);
59 pci_write_config_dword(entry->dev, pos, mask_bits);
64 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
65 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
66 writel(flag, entry->mask_base + offset);
75 void read_msi_msg(unsigned int irq, struct msi_msg *msg)
77 struct msi_desc *entry = get_irq_data(irq);
78 switch(entry->msi_attrib.type) {
81 struct pci_dev *dev = entry->dev;
82 int pos = entry->msi_attrib.pos;
85 pci_read_config_dword(dev, msi_lower_address_reg(pos),
87 if (entry->msi_attrib.is_64) {
88 pci_read_config_dword(dev, msi_upper_address_reg(pos),
90 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
93 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
101 base = entry->mask_base +
102 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
104 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
105 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
106 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
114 void write_msi_msg(unsigned int irq, struct msi_msg *msg)
116 struct msi_desc *entry = get_irq_data(irq);
117 switch (entry->msi_attrib.type) {
120 struct pci_dev *dev = entry->dev;
121 int pos = entry->msi_attrib.pos;
123 pci_write_config_dword(dev, msi_lower_address_reg(pos),
125 if (entry->msi_attrib.is_64) {
126 pci_write_config_dword(dev, msi_upper_address_reg(pos),
128 pci_write_config_word(dev, msi_data_reg(pos, 1),
131 pci_write_config_word(dev, msi_data_reg(pos, 0),
136 case PCI_CAP_ID_MSIX:
139 base = entry->mask_base +
140 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
142 writel(msg->address_lo,
143 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
144 writel(msg->address_hi,
145 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
146 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
154 void mask_msi_irq(unsigned int irq)
156 msi_set_mask_bit(irq, 1);
159 void unmask_msi_irq(unsigned int irq)
161 msi_set_mask_bit(irq, 0);
164 static int msi_free_irq(struct pci_dev* dev, int irq);
165 static int msi_init(void)
167 static int status = -ENOMEM;
174 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
179 status = msi_cache_init();
182 printk(KERN_WARNING "PCI: MSI cache init failed\n");
189 static struct msi_desc* alloc_msi_entry(void)
191 struct msi_desc *entry;
193 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
197 entry->link.tail = entry->link.head = 0; /* single message */
203 static void attach_msi_entry(struct msi_desc *entry, int irq)
207 spin_lock_irqsave(&msi_lock, flags);
208 msi_desc[irq] = entry;
209 spin_unlock_irqrestore(&msi_lock, flags);
212 static int create_msi_irq(void)
214 struct msi_desc *entry;
217 entry = alloc_msi_entry();
223 kmem_cache_free(msi_cachep, entry);
227 set_irq_data(irq, entry);
232 static void destroy_msi_irq(unsigned int irq)
234 struct msi_desc *entry;
236 entry = get_irq_data(irq);
237 set_irq_chip(irq, NULL);
238 set_irq_data(irq, NULL);
240 kmem_cache_free(msi_cachep, entry);
243 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
247 pci_read_config_word(dev, msi_control_reg(pos), &control);
248 if (type == PCI_CAP_ID_MSI) {
249 /* Set enabled bits to single MSI & enable MSI_enable bit */
250 msi_enable(control, 1);
251 pci_write_config_word(dev, msi_control_reg(pos), control);
252 dev->msi_enabled = 1;
254 msix_enable(control);
255 pci_write_config_word(dev, msi_control_reg(pos), control);
256 dev->msix_enabled = 1;
258 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
259 /* PCI Express Endpoint device detected */
260 pci_intx(dev, 0); /* disable intx */
264 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
268 pci_read_config_word(dev, msi_control_reg(pos), &control);
269 if (type == PCI_CAP_ID_MSI) {
270 /* Set enabled bits to single MSI & enable MSI_enable bit */
271 msi_disable(control);
272 pci_write_config_word(dev, msi_control_reg(pos), control);
273 dev->msi_enabled = 0;
275 msix_disable(control);
276 pci_write_config_word(dev, msi_control_reg(pos), control);
277 dev->msix_enabled = 0;
279 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
280 /* PCI Express Endpoint device detected */
281 pci_intx(dev, 1); /* enable intx */
285 static int msi_lookup_irq(struct pci_dev *dev, int type)
290 spin_lock_irqsave(&msi_lock, flags);
291 for (irq = 0; irq < NR_IRQS; irq++) {
292 if (!msi_desc[irq] || msi_desc[irq]->dev != dev ||
293 msi_desc[irq]->msi_attrib.type != type ||
294 msi_desc[irq]->msi_attrib.default_irq != dev->irq)
296 spin_unlock_irqrestore(&msi_lock, flags);
297 /* This pre-assigned MSI irq for this device
298 already exits. Override dev->irq with this irq */
302 spin_unlock_irqrestore(&msi_lock, flags);
307 void pci_scan_msi_device(struct pci_dev *dev)
314 int pci_save_msi_state(struct pci_dev *dev)
318 struct pci_cap_saved_state *save_state;
321 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
322 if (pos <= 0 || dev->no_msi)
325 pci_read_config_word(dev, msi_control_reg(pos), &control);
326 if (!(control & PCI_MSI_FLAGS_ENABLE))
329 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
332 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
335 cap = &save_state->data[0];
337 pci_read_config_dword(dev, pos, &cap[i++]);
338 control = cap[0] >> 16;
339 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
340 if (control & PCI_MSI_FLAGS_64BIT) {
341 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
342 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
344 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
345 if (control & PCI_MSI_FLAGS_MASKBIT)
346 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
347 save_state->cap_nr = PCI_CAP_ID_MSI;
348 pci_add_saved_cap(dev, save_state);
352 void pci_restore_msi_state(struct pci_dev *dev)
356 struct pci_cap_saved_state *save_state;
359 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
360 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
361 if (!save_state || pos <= 0)
363 cap = &save_state->data[0];
365 control = cap[i++] >> 16;
366 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
367 if (control & PCI_MSI_FLAGS_64BIT) {
368 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
369 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
371 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
372 if (control & PCI_MSI_FLAGS_MASKBIT)
373 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
374 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
375 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
376 pci_remove_saved_cap(save_state);
380 int pci_save_msix_state(struct pci_dev *dev)
384 int irq, head, tail = 0;
386 struct pci_cap_saved_state *save_state;
388 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
389 if (pos <= 0 || dev->no_msi)
392 /* save the capability */
393 pci_read_config_word(dev, msi_control_reg(pos), &control);
394 if (!(control & PCI_MSIX_FLAGS_ENABLE))
396 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
399 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
402 *((u16 *)&save_state->data[0]) = control;
406 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
411 irq = head = dev->irq;
412 while (head != tail) {
413 struct msi_desc *entry;
415 entry = msi_desc[irq];
416 read_msi_msg(irq, &entry->msg_save);
418 tail = msi_desc[irq]->link.tail;
423 save_state->cap_nr = PCI_CAP_ID_MSIX;
424 pci_add_saved_cap(dev, save_state);
428 void pci_restore_msix_state(struct pci_dev *dev)
432 int irq, head, tail = 0;
433 struct msi_desc *entry;
435 struct pci_cap_saved_state *save_state;
437 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
440 save = *((u16 *)&save_state->data[0]);
441 pci_remove_saved_cap(save_state);
444 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
448 /* route the table */
450 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX))
452 irq = head = dev->irq;
453 while (head != tail) {
454 entry = msi_desc[irq];
455 write_msi_msg(irq, &entry->msg_save);
457 tail = msi_desc[irq]->link.tail;
462 pci_write_config_word(dev, msi_control_reg(pos), save);
463 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
468 * msi_capability_init - configure device's MSI capability structure
469 * @dev: pointer to the pci_dev data structure of MSI device function
471 * Setup the MSI capability structure of device function with a single
472 * MSI irq, regardless of device function is capable of handling
473 * multiple messages. A return of zero indicates the successful setup
474 * of an entry zero with the new MSI irq or non-zero for otherwise.
476 static int msi_capability_init(struct pci_dev *dev)
479 struct msi_desc *entry;
483 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
484 pci_read_config_word(dev, msi_control_reg(pos), &control);
485 /* MSI Entry Initialization */
486 irq = create_msi_irq();
490 entry = get_irq_data(irq);
491 entry->link.head = irq;
492 entry->link.tail = irq;
493 entry->msi_attrib.type = PCI_CAP_ID_MSI;
494 entry->msi_attrib.is_64 = is_64bit_address(control);
495 entry->msi_attrib.entry_nr = 0;
496 entry->msi_attrib.maskbit = is_mask_bit_support(control);
497 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
498 entry->msi_attrib.pos = pos;
499 if (is_mask_bit_support(control)) {
500 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
501 is_64bit_address(control));
504 if (entry->msi_attrib.maskbit) {
505 unsigned int maskbits, temp;
506 /* All MSIs are unmasked by default, Mask them all */
507 pci_read_config_dword(dev,
508 msi_mask_bits_reg(pos, is_64bit_address(control)),
510 temp = (1 << multi_msi_capable(control));
511 temp = ((temp - 1) & ~temp);
513 pci_write_config_dword(dev,
514 msi_mask_bits_reg(pos, is_64bit_address(control)),
517 /* Configure MSI capability structure */
518 status = arch_setup_msi_irq(irq, dev);
520 destroy_msi_irq(irq);
524 attach_msi_entry(entry, irq);
525 /* Set MSI enabled bits */
526 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
533 * msix_capability_init - configure device's MSI-X capability
534 * @dev: pointer to the pci_dev data structure of MSI-X device function
535 * @entries: pointer to an array of struct msix_entry entries
536 * @nvec: number of @entries
538 * Setup the MSI-X capability structure of device function with a
539 * single MSI-X irq. A return of zero indicates the successful setup of
540 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
542 static int msix_capability_init(struct pci_dev *dev,
543 struct msix_entry *entries, int nvec)
545 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
547 int irq, pos, i, j, nr_entries, temp = 0;
548 unsigned long phys_addr;
554 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
555 /* Request & Map MSI-X table region */
556 pci_read_config_word(dev, msi_control_reg(pos), &control);
557 nr_entries = multi_msix_capable(control);
559 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
560 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
561 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
562 phys_addr = pci_resource_start (dev, bir) + table_offset;
563 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
567 /* MSI-X Table Initialization */
568 for (i = 0; i < nvec; i++) {
569 irq = create_msi_irq();
573 entry = get_irq_data(irq);
574 j = entries[i].entry;
575 entries[i].vector = irq;
576 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
577 entry->msi_attrib.is_64 = 1;
578 entry->msi_attrib.entry_nr = j;
579 entry->msi_attrib.maskbit = 1;
580 entry->msi_attrib.default_irq = dev->irq;
581 entry->msi_attrib.pos = pos;
583 entry->mask_base = base;
585 entry->link.head = irq;
586 entry->link.tail = irq;
589 entry->link.head = temp;
590 entry->link.tail = tail->link.tail;
591 tail->link.tail = irq;
592 head->link.head = irq;
596 /* Configure MSI-X capability structure */
597 status = arch_setup_msi_irq(irq, dev);
599 destroy_msi_irq(irq);
603 attach_msi_entry(entry, irq);
608 for (; i >= 0; i--) {
609 irq = (entries + i)->vector;
610 msi_free_irq(dev, irq);
611 (entries + i)->vector = 0;
613 /* If we had some success report the number of irqs
614 * we succeeded in setting up.
620 /* Set MSI-X enabled bits */
621 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
627 * pci_msi_supported - check whether MSI may be enabled on device
628 * @dev: pointer to the pci_dev data structure of MSI device function
630 * MSI must be globally enabled and supported by the device and its root
631 * bus. But, the root bus is not easy to find since some architectures
632 * have virtual busses on top of the PCI hierarchy (for instance the
633 * hypertransport bus), while the actual bus where MSI must be supported
634 * is below. So we test the MSI flag on all parent busses and assume
635 * that no quirk will ever set the NO_MSI flag on a non-root bus.
638 int pci_msi_supported(struct pci_dev * dev)
642 if (!pci_msi_enable || !dev || dev->no_msi)
645 /* check MSI flags of all parent busses */
646 for (bus = dev->bus; bus; bus = bus->parent)
647 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
654 * pci_enable_msi - configure device's MSI capability structure
655 * @dev: pointer to the pci_dev data structure of MSI device function
657 * Setup the MSI capability structure of device function with
658 * a single MSI irq upon its software driver call to request for
659 * MSI mode enabled on its hardware device function. A return of zero
660 * indicates the successful setup of an entry zero with the new MSI
661 * irq or non-zero for otherwise.
663 int pci_enable_msi(struct pci_dev* dev)
665 int pos, temp, status;
667 if (pci_msi_supported(dev) < 0)
676 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
680 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSI));
682 /* Check whether driver already requested for MSI-X irqs */
683 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
684 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
685 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
686 "Device already has MSI-X irq assigned\n",
691 status = msi_capability_init(dev);
695 void pci_disable_msi(struct pci_dev* dev)
697 struct msi_desc *entry;
698 int pos, default_irq;
707 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
711 pci_read_config_word(dev, msi_control_reg(pos), &control);
712 if (!(control & PCI_MSI_FLAGS_ENABLE))
715 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
717 spin_lock_irqsave(&msi_lock, flags);
718 entry = msi_desc[dev->irq];
719 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
720 spin_unlock_irqrestore(&msi_lock, flags);
723 if (irq_has_action(dev->irq)) {
724 spin_unlock_irqrestore(&msi_lock, flags);
725 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
726 "free_irq() on MSI irq %d\n",
727 pci_name(dev), dev->irq);
728 BUG_ON(irq_has_action(dev->irq));
730 default_irq = entry->msi_attrib.default_irq;
731 spin_unlock_irqrestore(&msi_lock, flags);
732 msi_free_irq(dev, dev->irq);
734 /* Restore dev->irq to its default pin-assertion irq */
735 dev->irq = default_irq;
739 static int msi_free_irq(struct pci_dev* dev, int irq)
741 struct msi_desc *entry;
742 int head, entry_nr, type;
746 arch_teardown_msi_irq(irq);
748 spin_lock_irqsave(&msi_lock, flags);
749 entry = msi_desc[irq];
750 if (!entry || entry->dev != dev) {
751 spin_unlock_irqrestore(&msi_lock, flags);
754 type = entry->msi_attrib.type;
755 entry_nr = entry->msi_attrib.entry_nr;
756 head = entry->link.head;
757 base = entry->mask_base;
758 msi_desc[entry->link.head]->link.tail = entry->link.tail;
759 msi_desc[entry->link.tail]->link.head = entry->link.head;
761 msi_desc[irq] = NULL;
762 spin_unlock_irqrestore(&msi_lock, flags);
764 destroy_msi_irq(irq);
766 if (type == PCI_CAP_ID_MSIX) {
767 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
768 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
778 * pci_enable_msix - configure device's MSI-X capability structure
779 * @dev: pointer to the pci_dev data structure of MSI-X device function
780 * @entries: pointer to an array of MSI-X entries
781 * @nvec: number of MSI-X irqs requested for allocation by device driver
783 * Setup the MSI-X capability structure of device function with the number
784 * of requested irqs upon its software driver call to request for
785 * MSI-X mode enabled on its hardware device function. A return of zero
786 * indicates the successful configuration of MSI-X capability structure
787 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
788 * Or a return of > 0 indicates that driver request is exceeding the number
789 * of irqs available. Driver should use the returned value to re-send
792 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
794 int status, pos, nr_entries;
798 if (!entries || pci_msi_supported(dev) < 0)
805 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
809 pci_read_config_word(dev, msi_control_reg(pos), &control);
810 nr_entries = multi_msix_capable(control);
811 if (nvec > nr_entries)
814 /* Check for any invalid entries */
815 for (i = 0; i < nvec; i++) {
816 if (entries[i].entry >= nr_entries)
817 return -EINVAL; /* invalid entry */
818 for (j = i + 1; j < nvec; j++) {
819 if (entries[i].entry == entries[j].entry)
820 return -EINVAL; /* duplicate entry */
824 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSIX));
826 /* Check whether driver already requested for MSI irq */
827 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
828 !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
829 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
830 "Device already has an MSI irq assigned\n",
835 status = msix_capability_init(dev, entries, nvec);
839 void pci_disable_msix(struct pci_dev* dev)
849 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
853 pci_read_config_word(dev, msi_control_reg(pos), &control);
854 if (!(control & PCI_MSIX_FLAGS_ENABLE))
857 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
860 if (!msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
861 int irq, head, tail = 0, warning = 0;
864 irq = head = dev->irq;
865 dev->irq = temp; /* Restore pin IRQ */
866 while (head != tail) {
867 spin_lock_irqsave(&msi_lock, flags);
868 tail = msi_desc[irq]->link.tail;
869 spin_unlock_irqrestore(&msi_lock, flags);
870 if (irq_has_action(irq))
872 else if (irq != head) /* Release MSI-X irq */
873 msi_free_irq(dev, irq);
876 msi_free_irq(dev, irq);
878 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
879 "free_irq() on all MSI-X irqs\n",
887 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
888 * @dev: pointer to the pci_dev data structure of MSI(X) device function
890 * Being called during hotplug remove, from which the device function
891 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
892 * allocated for this device function, are reclaimed to unused state,
893 * which may be used later on.
895 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
900 if (!pci_msi_enable || !dev)
903 temp = dev->irq; /* Save IOAPIC IRQ */
904 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
905 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
906 if (irq_has_action(dev->irq)) {
907 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
908 "called without free_irq() on MSI irq %d\n",
909 pci_name(dev), dev->irq);
910 BUG_ON(irq_has_action(dev->irq));
911 } else /* Release MSI irq assigned to this device */
912 msi_free_irq(dev, dev->irq);
913 dev->irq = temp; /* Restore IOAPIC IRQ */
915 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
916 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
917 int irq, head, tail = 0, warning = 0;
918 void __iomem *base = NULL;
920 irq = head = dev->irq;
921 while (head != tail) {
922 spin_lock_irqsave(&msi_lock, flags);
923 tail = msi_desc[irq]->link.tail;
924 base = msi_desc[irq]->mask_base;
925 spin_unlock_irqrestore(&msi_lock, flags);
926 if (irq_has_action(irq))
928 else if (irq != head) /* Release MSI-X irq */
929 msi_free_irq(dev, irq);
932 msi_free_irq(dev, irq);
935 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
936 "called without free_irq() on all MSI-X irqs\n",
940 dev->irq = temp; /* Restore IOAPIC IRQ */
944 void pci_no_msi(void)
949 EXPORT_SYMBOL(pci_enable_msi);
950 EXPORT_SYMBOL(pci_disable_msi);
951 EXPORT_SYMBOL(pci_enable_msix);
952 EXPORT_SYMBOL(pci_disable_msix);