3 * Purpose: PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/smp_lock.h>
15 #include <linux/pci.h>
16 #include <linux/proc_fs.h>
18 #include <asm/errno.h>
25 static DEFINE_SPINLOCK(msi_lock);
26 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27 static kmem_cache_t* msi_cachep;
29 static int pci_msi_enable = 1;
30 static int last_alloc_vector;
31 static int nr_released_vectors;
32 static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
33 static int nr_msix_devices;
35 #ifndef CONFIG_X86_IO_APIC
36 int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
39 static struct msi_ops *msi_ops;
42 msi_register(struct msi_ops *ops)
48 static int msi_cache_init(void)
50 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
51 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
58 static void msi_set_mask_bit(unsigned int vector, int flag)
60 struct msi_desc *entry;
62 entry = (struct msi_desc *)msi_desc[vector];
63 if (!entry || !entry->dev || !entry->mask_base)
65 switch (entry->msi_attrib.type) {
71 pos = (long)entry->mask_base;
72 pci_read_config_dword(entry->dev, pos, &mask_bits);
75 pci_write_config_dword(entry->dev, pos, mask_bits);
80 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
81 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
82 writel(flag, entry->mask_base + offset);
91 static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
93 struct msi_desc *entry;
94 u32 address_hi, address_lo;
95 unsigned int irq = vector;
96 unsigned int dest_cpu = first_cpu(cpu_mask);
98 entry = (struct msi_desc *)msi_desc[vector];
99 if (!entry || !entry->dev)
102 switch (entry->msi_attrib.type) {
105 int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);
110 pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
112 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
115 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
117 pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
119 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
121 set_native_irq_info(irq, cpu_mask);
124 case PCI_CAP_ID_MSIX:
127 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
128 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
130 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
131 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
133 address_hi = readl(entry->mask_base + offset_hi);
134 address_lo = readl(entry->mask_base + offset_lo);
136 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
138 writel(address_hi, entry->mask_base + offset_hi);
139 writel(address_lo, entry->mask_base + offset_lo);
140 set_native_irq_info(irq, cpu_mask);
148 #define set_msi_affinity NULL
149 #endif /* CONFIG_SMP */
151 static void mask_MSI_irq(unsigned int vector)
153 msi_set_mask_bit(vector, 1);
156 static void unmask_MSI_irq(unsigned int vector)
158 msi_set_mask_bit(vector, 0);
161 static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
163 struct msi_desc *entry;
166 spin_lock_irqsave(&msi_lock, flags);
167 entry = msi_desc[vector];
168 if (!entry || !entry->dev) {
169 spin_unlock_irqrestore(&msi_lock, flags);
172 entry->msi_attrib.state = 1; /* Mark it active */
173 spin_unlock_irqrestore(&msi_lock, flags);
175 return 0; /* never anything pending */
178 static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
180 startup_msi_irq_wo_maskbit(vector);
181 unmask_MSI_irq(vector);
182 return 0; /* never anything pending */
185 static void shutdown_msi_irq(unsigned int vector)
187 struct msi_desc *entry;
190 spin_lock_irqsave(&msi_lock, flags);
191 entry = msi_desc[vector];
192 if (entry && entry->dev)
193 entry->msi_attrib.state = 0; /* Mark it not active */
194 spin_unlock_irqrestore(&msi_lock, flags);
197 static void end_msi_irq_wo_maskbit(unsigned int vector)
199 move_native_irq(vector);
203 static void end_msi_irq_w_maskbit(unsigned int vector)
205 move_native_irq(vector);
206 unmask_MSI_irq(vector);
210 static void do_nothing(unsigned int vector)
215 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
216 * which implement the MSI-X Capability Structure.
218 static struct hw_interrupt_type msix_irq_type = {
219 .typename = "PCI-MSI-X",
220 .startup = startup_msi_irq_w_maskbit,
221 .shutdown = shutdown_msi_irq,
222 .enable = unmask_MSI_irq,
223 .disable = mask_MSI_irq,
225 .end = end_msi_irq_w_maskbit,
226 .set_affinity = set_msi_affinity
230 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
231 * which implement the MSI Capability Structure with
232 * Mask-and-Pending Bits.
234 static struct hw_interrupt_type msi_irq_w_maskbit_type = {
235 .typename = "PCI-MSI",
236 .startup = startup_msi_irq_w_maskbit,
237 .shutdown = shutdown_msi_irq,
238 .enable = unmask_MSI_irq,
239 .disable = mask_MSI_irq,
241 .end = end_msi_irq_w_maskbit,
242 .set_affinity = set_msi_affinity
246 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
247 * which implement the MSI Capability Structure without
248 * Mask-and-Pending Bits.
250 static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
251 .typename = "PCI-MSI",
252 .startup = startup_msi_irq_wo_maskbit,
253 .shutdown = shutdown_msi_irq,
254 .enable = do_nothing,
255 .disable = do_nothing,
257 .end = end_msi_irq_wo_maskbit,
258 .set_affinity = set_msi_affinity
261 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
262 static int assign_msi_vector(void)
264 static int new_vector_avail = 1;
269 * msi_lock is provided to ensure that successful allocation of MSI
270 * vector is assigned unique among drivers.
272 spin_lock_irqsave(&msi_lock, flags);
274 if (!new_vector_avail) {
278 * vector_irq[] = -1 indicates that this specific vector is:
279 * - assigned for MSI (since MSI have no associated IRQ) or
280 * - assigned for legacy if less than 16, or
281 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
282 * vector_irq[] = 0 indicates that this vector, previously
283 * assigned for MSI, is freed by hotplug removed operations.
284 * This vector will be reused for any subsequent hotplug added
286 * vector_irq[] > 0 indicates that this vector is assigned for
287 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
288 * vector-to-IOxAPIC IRQ mapping.
290 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
291 if (vector_irq[vector] != 0)
293 free_vector = vector;
294 if (!msi_desc[vector])
300 spin_unlock_irqrestore(&msi_lock, flags);
303 vector_irq[free_vector] = -1;
304 nr_released_vectors--;
305 spin_unlock_irqrestore(&msi_lock, flags);
306 if (msi_desc[free_vector] != NULL) {
310 /* free all linked vectors before re-assign */
312 spin_lock_irqsave(&msi_lock, flags);
313 dev = msi_desc[free_vector]->dev;
314 tail = msi_desc[free_vector]->link.tail;
315 spin_unlock_irqrestore(&msi_lock, flags);
316 msi_free_vector(dev, tail, 1);
317 } while (free_vector != tail);
322 vector = assign_irq_vector(AUTO_ASSIGN);
323 last_alloc_vector = vector;
324 if (vector == LAST_DEVICE_VECTOR)
325 new_vector_avail = 0;
327 spin_unlock_irqrestore(&msi_lock, flags);
331 static int get_new_vector(void)
333 int vector = assign_msi_vector();
336 set_intr_gate(vector, interrupt[vector]);
341 static int msi_init(void)
343 static int status = -ENOMEM;
350 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
355 status = msi_arch_init();
359 "PCI: MSI arch init failed. MSI disabled.\n");
365 "PCI: MSI ops not registered. MSI disabled.\n");
370 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
371 status = msi_cache_init();
374 printk(KERN_WARNING "PCI: MSI cache init failed\n");
378 if (last_alloc_vector < 0) {
380 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
384 vector_irq[last_alloc_vector] = 0;
385 nr_released_vectors++;
390 static int get_msi_vector(struct pci_dev *dev)
392 return get_new_vector();
395 static struct msi_desc* alloc_msi_entry(void)
397 struct msi_desc *entry;
399 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
403 entry->link.tail = entry->link.head = 0; /* single message */
409 static void attach_msi_entry(struct msi_desc *entry, int vector)
413 spin_lock_irqsave(&msi_lock, flags);
414 msi_desc[vector] = entry;
415 spin_unlock_irqrestore(&msi_lock, flags);
418 static void irq_handler_init(int cap_id, int pos, int mask)
422 spin_lock_irqsave(&irq_desc[pos].lock, flags);
423 if (cap_id == PCI_CAP_ID_MSIX)
424 irq_desc[pos].chip = &msix_irq_type;
427 irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
429 irq_desc[pos].chip = &msi_irq_w_maskbit_type;
431 spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
434 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
438 pci_read_config_word(dev, msi_control_reg(pos), &control);
439 if (type == PCI_CAP_ID_MSI) {
440 /* Set enabled bits to single MSI & enable MSI_enable bit */
441 msi_enable(control, 1);
442 pci_write_config_word(dev, msi_control_reg(pos), control);
443 dev->msi_enabled = 1;
445 msix_enable(control);
446 pci_write_config_word(dev, msi_control_reg(pos), control);
447 dev->msix_enabled = 1;
449 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
450 /* PCI Express Endpoint device detected */
451 pci_intx(dev, 0); /* disable intx */
455 void disable_msi_mode(struct pci_dev *dev, int pos, int type)
459 pci_read_config_word(dev, msi_control_reg(pos), &control);
460 if (type == PCI_CAP_ID_MSI) {
461 /* Set enabled bits to single MSI & enable MSI_enable bit */
462 msi_disable(control);
463 pci_write_config_word(dev, msi_control_reg(pos), control);
464 dev->msi_enabled = 0;
466 msix_disable(control);
467 pci_write_config_word(dev, msi_control_reg(pos), control);
468 dev->msix_enabled = 0;
470 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
471 /* PCI Express Endpoint device detected */
472 pci_intx(dev, 1); /* enable intx */
476 static int msi_lookup_vector(struct pci_dev *dev, int type)
481 spin_lock_irqsave(&msi_lock, flags);
482 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
483 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
484 msi_desc[vector]->msi_attrib.type != type ||
485 msi_desc[vector]->msi_attrib.default_vector != dev->irq)
487 spin_unlock_irqrestore(&msi_lock, flags);
488 /* This pre-assigned MSI vector for this device
489 already exits. Override dev->irq with this vector */
493 spin_unlock_irqrestore(&msi_lock, flags);
498 void pci_scan_msi_device(struct pci_dev *dev)
503 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
505 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
506 nr_reserved_vectors++;
510 int pci_save_msi_state(struct pci_dev *dev)
514 struct pci_cap_saved_state *save_state;
517 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
518 if (pos <= 0 || dev->no_msi)
521 pci_read_config_word(dev, msi_control_reg(pos), &control);
522 if (!(control & PCI_MSI_FLAGS_ENABLE))
525 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
528 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
531 cap = &save_state->data[0];
533 pci_read_config_dword(dev, pos, &cap[i++]);
534 control = cap[0] >> 16;
535 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
536 if (control & PCI_MSI_FLAGS_64BIT) {
537 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
538 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
540 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
541 if (control & PCI_MSI_FLAGS_MASKBIT)
542 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
543 save_state->cap_nr = PCI_CAP_ID_MSI;
544 pci_add_saved_cap(dev, save_state);
548 void pci_restore_msi_state(struct pci_dev *dev)
552 struct pci_cap_saved_state *save_state;
555 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
556 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
557 if (!save_state || pos <= 0)
559 cap = &save_state->data[0];
561 control = cap[i++] >> 16;
562 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
563 if (control & PCI_MSI_FLAGS_64BIT) {
564 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
565 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
567 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
568 if (control & PCI_MSI_FLAGS_MASKBIT)
569 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
570 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
571 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
572 pci_remove_saved_cap(save_state);
576 int pci_save_msix_state(struct pci_dev *dev)
580 int vector, head, tail = 0;
582 struct pci_cap_saved_state *save_state;
584 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
585 if (pos <= 0 || dev->no_msi)
588 /* save the capability */
589 pci_read_config_word(dev, msi_control_reg(pos), &control);
590 if (!(control & PCI_MSIX_FLAGS_ENABLE))
592 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
595 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
598 *((u16 *)&save_state->data[0]) = control;
602 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
607 vector = head = dev->irq;
608 while (head != tail) {
611 struct msi_desc *entry;
613 entry = msi_desc[vector];
614 base = entry->mask_base;
615 j = entry->msi_attrib.entry_nr;
617 entry->address_lo_save =
618 readl(base + j * PCI_MSIX_ENTRY_SIZE +
619 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
620 entry->address_hi_save =
621 readl(base + j * PCI_MSIX_ENTRY_SIZE +
622 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
624 readl(base + j * PCI_MSIX_ENTRY_SIZE +
625 PCI_MSIX_ENTRY_DATA_OFFSET);
627 tail = msi_desc[vector]->link.tail;
632 save_state->cap_nr = PCI_CAP_ID_MSIX;
633 pci_add_saved_cap(dev, save_state);
637 void pci_restore_msix_state(struct pci_dev *dev)
641 int vector, head, tail = 0;
644 struct msi_desc *entry;
646 struct pci_cap_saved_state *save_state;
648 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
651 save = *((u16 *)&save_state->data[0]);
652 pci_remove_saved_cap(save_state);
655 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
659 /* route the table */
661 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
663 vector = head = dev->irq;
664 while (head != tail) {
665 entry = msi_desc[vector];
666 base = entry->mask_base;
667 j = entry->msi_attrib.entry_nr;
669 writel(entry->address_lo_save,
670 base + j * PCI_MSIX_ENTRY_SIZE +
671 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
672 writel(entry->address_hi_save,
673 base + j * PCI_MSIX_ENTRY_SIZE +
674 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
675 writel(entry->data_save,
676 base + j * PCI_MSIX_ENTRY_SIZE +
677 PCI_MSIX_ENTRY_DATA_OFFSET);
679 tail = msi_desc[vector]->link.tail;
684 pci_write_config_word(dev, msi_control_reg(pos), save);
685 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
689 static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
695 int pos, vector = dev->irq;
698 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
699 pci_read_config_word(dev, msi_control_reg(pos), &control);
701 /* Configure MSI capability structure */
702 status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
706 pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
707 if (is_64bit_address(control)) {
708 pci_write_config_dword(dev,
709 msi_upper_address_reg(pos), address_hi);
710 pci_write_config_word(dev,
711 msi_data_reg(pos, 1), data);
713 pci_write_config_word(dev,
714 msi_data_reg(pos, 0), data);
715 if (entry->msi_attrib.maskbit) {
716 unsigned int maskbits, temp;
717 /* All MSIs are unmasked by default, Mask them all */
718 pci_read_config_dword(dev,
719 msi_mask_bits_reg(pos, is_64bit_address(control)),
721 temp = (1 << multi_msi_capable(control));
722 temp = ((temp - 1) & ~temp);
724 pci_write_config_dword(dev,
725 msi_mask_bits_reg(pos, is_64bit_address(control)),
733 * msi_capability_init - configure device's MSI capability structure
734 * @dev: pointer to the pci_dev data structure of MSI device function
736 * Setup the MSI capability structure of device function with a single
737 * MSI vector, regardless of device function is capable of handling
738 * multiple messages. A return of zero indicates the successful setup
739 * of an entry zero with the new MSI vector or non-zero for otherwise.
741 static int msi_capability_init(struct pci_dev *dev)
744 struct msi_desc *entry;
748 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
749 pci_read_config_word(dev, msi_control_reg(pos), &control);
750 /* MSI Entry Initialization */
751 entry = alloc_msi_entry();
755 vector = get_msi_vector(dev);
757 kmem_cache_free(msi_cachep, entry);
760 entry->link.head = vector;
761 entry->link.tail = vector;
762 entry->msi_attrib.type = PCI_CAP_ID_MSI;
763 entry->msi_attrib.state = 0; /* Mark it not active */
764 entry->msi_attrib.entry_nr = 0;
765 entry->msi_attrib.maskbit = is_mask_bit_support(control);
766 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
769 if (is_mask_bit_support(control)) {
770 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
771 is_64bit_address(control));
773 /* Replace with MSI handler */
774 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
775 /* Configure MSI capability structure */
776 status = msi_register_init(dev, entry);
778 dev->irq = entry->msi_attrib.default_vector;
779 kmem_cache_free(msi_cachep, entry);
783 attach_msi_entry(entry, vector);
784 /* Set MSI enabled bits */
785 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
791 * msix_capability_init - configure device's MSI-X capability
792 * @dev: pointer to the pci_dev data structure of MSI-X device function
793 * @entries: pointer to an array of struct msix_entry entries
794 * @nvec: number of @entries
796 * Setup the MSI-X capability structure of device function with a
797 * single MSI-X vector. A return of zero indicates the successful setup of
798 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
800 static int msix_capability_init(struct pci_dev *dev,
801 struct msix_entry *entries, int nvec)
803 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
808 int vector, pos, i, j, nr_entries, temp = 0;
809 unsigned long phys_addr;
815 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
816 /* Request & Map MSI-X table region */
817 pci_read_config_word(dev, msi_control_reg(pos), &control);
818 nr_entries = multi_msix_capable(control);
820 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
821 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
822 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
823 phys_addr = pci_resource_start (dev, bir) + table_offset;
824 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
828 /* MSI-X Table Initialization */
829 for (i = 0; i < nvec; i++) {
830 entry = alloc_msi_entry();
833 vector = get_msi_vector(dev);
835 kmem_cache_free(msi_cachep, entry);
839 j = entries[i].entry;
840 entries[i].vector = vector;
841 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
842 entry->msi_attrib.state = 0; /* Mark it not active */
843 entry->msi_attrib.entry_nr = j;
844 entry->msi_attrib.maskbit = 1;
845 entry->msi_attrib.default_vector = dev->irq;
847 entry->mask_base = base;
849 entry->link.head = vector;
850 entry->link.tail = vector;
853 entry->link.head = temp;
854 entry->link.tail = tail->link.tail;
855 tail->link.tail = vector;
856 head->link.head = vector;
860 /* Replace with MSI-X handler */
861 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
862 /* Configure MSI-X capability structure */
863 status = msi_ops->setup(dev, vector,
871 base + j * PCI_MSIX_ENTRY_SIZE +
872 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
874 base + j * PCI_MSIX_ENTRY_SIZE +
875 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
877 base + j * PCI_MSIX_ENTRY_SIZE +
878 PCI_MSIX_ENTRY_DATA_OFFSET);
879 attach_msi_entry(entry, vector);
883 for (; i >= 0; i--) {
884 vector = (entries + i)->vector;
885 msi_free_vector(dev, vector, 0);
886 (entries + i)->vector = 0;
890 /* Set MSI-X enabled bits */
891 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
897 * pci_msi_supported - check whether MSI may be enabled on device
898 * @dev: pointer to the pci_dev data structure of MSI device function
900 * MSI must be globally enabled and supported by the device and its root
901 * bus. But, the root bus is not easy to find since some architectures
902 * have virtual busses on top of the PCI hierarchy (for instance the
903 * hypertransport bus), while the actual bus where MSI must be supported
904 * is below. So we test the MSI flag on all parent busses and assume
905 * that no quirk will ever set the NO_MSI flag on a non-root bus.
908 int pci_msi_supported(struct pci_dev * dev)
912 if (!pci_msi_enable || !dev || dev->no_msi)
915 /* check MSI flags of all parent busses */
916 for (bus = dev->bus; bus; bus = bus->parent)
917 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
924 * pci_enable_msi - configure device's MSI capability structure
925 * @dev: pointer to the pci_dev data structure of MSI device function
927 * Setup the MSI capability structure of device function with
928 * a single MSI vector upon its software driver call to request for
929 * MSI mode enabled on its hardware device function. A return of zero
930 * indicates the successful setup of an entry zero with the new MSI
931 * vector or non-zero for otherwise.
933 int pci_enable_msi(struct pci_dev* dev)
935 int pos, temp, status;
937 if (pci_msi_supported(dev) < 0)
946 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
950 WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSI));
952 /* Check whether driver already requested for MSI-X vectors */
953 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
954 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
955 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
956 "Device already has MSI-X vectors assigned\n",
961 status = msi_capability_init(dev);
964 nr_reserved_vectors--; /* Only MSI capable */
965 else if (nr_msix_devices > 0)
966 nr_msix_devices--; /* Both MSI and MSI-X capable,
967 but choose enabling MSI */
973 void pci_disable_msi(struct pci_dev* dev)
975 struct msi_desc *entry;
976 int pos, default_vector;
985 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
989 pci_read_config_word(dev, msi_control_reg(pos), &control);
990 if (!(control & PCI_MSI_FLAGS_ENABLE))
993 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
995 spin_lock_irqsave(&msi_lock, flags);
996 entry = msi_desc[dev->irq];
997 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
998 spin_unlock_irqrestore(&msi_lock, flags);
1001 if (entry->msi_attrib.state) {
1002 spin_unlock_irqrestore(&msi_lock, flags);
1003 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
1004 "free_irq() on MSI vector %d\n",
1005 pci_name(dev), dev->irq);
1006 BUG_ON(entry->msi_attrib.state > 0);
1008 default_vector = entry->msi_attrib.default_vector;
1009 spin_unlock_irqrestore(&msi_lock, flags);
1010 msi_free_vector(dev, dev->irq, 0);
1012 /* Restore dev->irq to its default pin-assertion vector */
1013 dev->irq = default_vector;
1017 static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1019 struct msi_desc *entry;
1020 int head, entry_nr, type;
1022 unsigned long flags;
1024 msi_ops->teardown(vector);
1026 spin_lock_irqsave(&msi_lock, flags);
1027 entry = msi_desc[vector];
1028 if (!entry || entry->dev != dev) {
1029 spin_unlock_irqrestore(&msi_lock, flags);
1032 type = entry->msi_attrib.type;
1033 entry_nr = entry->msi_attrib.entry_nr;
1034 head = entry->link.head;
1035 base = entry->mask_base;
1036 msi_desc[entry->link.head]->link.tail = entry->link.tail;
1037 msi_desc[entry->link.tail]->link.head = entry->link.head;
1040 vector_irq[vector] = 0;
1041 nr_released_vectors++;
1043 msi_desc[vector] = NULL;
1044 spin_unlock_irqrestore(&msi_lock, flags);
1046 kmem_cache_free(msi_cachep, entry);
1048 if (type == PCI_CAP_ID_MSIX) {
1051 entry_nr * PCI_MSIX_ENTRY_SIZE +
1052 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1062 * pci_enable_msix - configure device's MSI-X capability structure
1063 * @dev: pointer to the pci_dev data structure of MSI-X device function
1064 * @entries: pointer to an array of MSI-X entries
1065 * @nvec: number of MSI-X vectors requested for allocation by device driver
1067 * Setup the MSI-X capability structure of device function with the number
1068 * of requested vectors upon its software driver call to request for
1069 * MSI-X mode enabled on its hardware device function. A return of zero
1070 * indicates the successful configuration of MSI-X capability structure
1071 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1072 * Or a return of > 0 indicates that driver request is exceeding the number
1073 * of vectors available. Driver should use the returned value to re-send
1076 int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1078 int status, pos, nr_entries, free_vectors;
1081 unsigned long flags;
1083 if (!entries || pci_msi_supported(dev) < 0)
1086 status = msi_init();
1090 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1094 pci_read_config_word(dev, msi_control_reg(pos), &control);
1095 nr_entries = multi_msix_capable(control);
1096 if (nvec > nr_entries)
1099 /* Check for any invalid entries */
1100 for (i = 0; i < nvec; i++) {
1101 if (entries[i].entry >= nr_entries)
1102 return -EINVAL; /* invalid entry */
1103 for (j = i + 1; j < nvec; j++) {
1104 if (entries[i].entry == entries[j].entry)
1105 return -EINVAL; /* duplicate entry */
1109 WARN_ON(!msi_lookup_vector(dev, PCI_CAP_ID_MSIX));
1111 /* Check whether driver already requested for MSI vector */
1112 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1113 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1114 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
1115 "Device already has an MSI vector assigned\n",
1121 spin_lock_irqsave(&msi_lock, flags);
1123 * msi_lock is provided to ensure that enough vectors resources are
1124 * available before granting.
1126 free_vectors = pci_vector_resources(last_alloc_vector,
1127 nr_released_vectors);
1128 /* Ensure that each MSI/MSI-X device has one vector reserved by
1129 default to avoid any MSI-X driver to take all available
1131 free_vectors -= nr_reserved_vectors;
1132 /* Find the average of free vectors among MSI-X devices */
1133 if (nr_msix_devices > 0)
1134 free_vectors /= nr_msix_devices;
1135 spin_unlock_irqrestore(&msi_lock, flags);
1137 if (nvec > free_vectors) {
1138 if (free_vectors > 0)
1139 return free_vectors;
1144 status = msix_capability_init(dev, entries, nvec);
1145 if (!status && nr_msix_devices > 0)
1151 void pci_disable_msix(struct pci_dev* dev)
1156 if (!pci_msi_enable)
1161 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1165 pci_read_config_word(dev, msi_control_reg(pos), &control);
1166 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1169 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1172 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1173 int state, vector, head, tail = 0, warning = 0;
1174 unsigned long flags;
1176 vector = head = dev->irq;
1177 dev->irq = temp; /* Restore pin IRQ */
1178 while (head != tail) {
1179 spin_lock_irqsave(&msi_lock, flags);
1180 state = msi_desc[vector]->msi_attrib.state;
1181 tail = msi_desc[vector]->link.tail;
1182 spin_unlock_irqrestore(&msi_lock, flags);
1185 else if (vector != head) /* Release MSI-X vector */
1186 msi_free_vector(dev, vector, 0);
1189 msi_free_vector(dev, vector, 0);
1191 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1192 "free_irq() on all MSI-X vectors\n",
1194 BUG_ON(warning > 0);
1200 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1201 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1203 * Being called during hotplug remove, from which the device function
1204 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1205 * allocated for this device function, are reclaimed to unused state,
1206 * which may be used later on.
1208 void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1210 int state, pos, temp;
1211 unsigned long flags;
1213 if (!pci_msi_enable || !dev)
1216 temp = dev->irq; /* Save IOAPIC IRQ */
1217 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1218 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1219 spin_lock_irqsave(&msi_lock, flags);
1220 state = msi_desc[dev->irq]->msi_attrib.state;
1221 spin_unlock_irqrestore(&msi_lock, flags);
1223 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1224 "called without free_irq() on MSI vector %d\n",
1225 pci_name(dev), dev->irq);
1227 } else /* Release MSI vector assigned to this device */
1228 msi_free_vector(dev, dev->irq, 0);
1229 dev->irq = temp; /* Restore IOAPIC IRQ */
1231 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1232 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1233 int vector, head, tail = 0, warning = 0;
1234 void __iomem *base = NULL;
1236 vector = head = dev->irq;
1237 while (head != tail) {
1238 spin_lock_irqsave(&msi_lock, flags);
1239 state = msi_desc[vector]->msi_attrib.state;
1240 tail = msi_desc[vector]->link.tail;
1241 base = msi_desc[vector]->mask_base;
1242 spin_unlock_irqrestore(&msi_lock, flags);
1245 else if (vector != head) /* Release MSI-X vector */
1246 msi_free_vector(dev, vector, 0);
1249 msi_free_vector(dev, vector, 0);
1252 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1253 "called without free_irq() on all MSI-X vectors\n",
1255 BUG_ON(warning > 0);
1257 dev->irq = temp; /* Restore IOAPIC IRQ */
1261 void pci_no_msi(void)
1266 EXPORT_SYMBOL(pci_enable_msi);
1267 EXPORT_SYMBOL(pci_disable_msi);
1268 EXPORT_SYMBOL(pci_enable_msix);
1269 EXPORT_SYMBOL(pci_disable_msix);