1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16 #include <linux/scatterlist.h>
18 #include <asm/iommu.h>
21 #include <asm/pstate.h>
22 #include <asm/oplib.h>
23 #include <asm/hypervisor.h>
27 #include "iommu_common.h"
29 #include "pci_sun4v.h"
31 static unsigned long vpci_major = 1;
32 static unsigned long vpci_minor = 1;
34 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
37 struct device *dev; /* Device mapping is for. */
38 unsigned long prot; /* IOMMU page protections */
39 unsigned long entry; /* Index into IOTSB. */
40 u64 *pglist; /* List of physical pages */
41 unsigned long npages; /* Number of pages in list. */
44 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
46 /* Interrupts must be disabled. */
47 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
49 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
57 /* Interrupts must be disabled. */
58 static long iommu_batch_flush(struct iommu_batch *p)
60 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
61 unsigned long devhandle = pbm->devhandle;
62 unsigned long prot = p->prot;
63 unsigned long entry = p->entry;
64 u64 *pglist = p->pglist;
65 unsigned long npages = p->npages;
70 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
71 npages, prot, __pa(pglist));
72 if (unlikely(num < 0)) {
73 if (printk_ratelimit())
74 printk("iommu_batch_flush: IOMMU map of "
75 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
77 devhandle, HV_PCI_TSBID(0, entry),
78 npages, prot, __pa(pglist), num);
93 /* Interrupts must be disabled. */
94 static inline long iommu_batch_add(u64 phys_page)
96 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
98 BUG_ON(p->npages >= PGLIST_NENTS);
100 p->pglist[p->npages++] = phys_page;
101 if (p->npages == PGLIST_NENTS)
102 return iommu_batch_flush(p);
107 /* Interrupts must be disabled. */
108 static inline long iommu_batch_end(void)
110 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
112 BUG_ON(p->npages >= PGLIST_NENTS);
114 return iommu_batch_flush(p);
117 static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
119 unsigned long n, i, start, end, limit;
122 limit = arena->limit;
127 n = find_next_zero_bit(arena->map, limit, start);
129 if (unlikely(end >= limit)) {
130 if (likely(pass < 1)) {
136 /* Scanned the whole thing, give up. */
141 for (i = n; i < end; i++) {
142 if (test_bit(i, arena->map)) {
148 for (i = n; i < end; i++)
149 __set_bit(i, arena->map);
156 static void arena_free(struct iommu_arena *arena, unsigned long base,
157 unsigned long npages)
161 for (i = base; i < (base + npages); i++)
162 __clear_bit(i, arena->map);
165 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
166 dma_addr_t *dma_addrp, gfp_t gfp)
169 unsigned long flags, order, first_page, npages, n;
173 size = IO_PAGE_ALIGN(size);
174 order = get_order(size);
175 if (unlikely(order >= MAX_ORDER))
178 npages = size >> IO_PAGE_SHIFT;
180 first_page = __get_free_pages(gfp, order);
181 if (unlikely(first_page == 0UL))
184 memset((char *)first_page, 0, PAGE_SIZE << order);
186 iommu = dev->archdata.iommu;
188 spin_lock_irqsave(&iommu->lock, flags);
189 entry = arena_alloc(&iommu->arena, npages);
190 spin_unlock_irqrestore(&iommu->lock, flags);
192 if (unlikely(entry < 0L))
193 goto arena_alloc_fail;
195 *dma_addrp = (iommu->page_table_map_base +
196 (entry << IO_PAGE_SHIFT));
197 ret = (void *) first_page;
198 first_page = __pa(first_page);
200 local_irq_save(flags);
202 iommu_batch_start(dev,
203 (HV_PCI_MAP_ATTR_READ |
204 HV_PCI_MAP_ATTR_WRITE),
207 for (n = 0; n < npages; n++) {
208 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
209 if (unlikely(err < 0L))
213 if (unlikely(iommu_batch_end() < 0L))
216 local_irq_restore(flags);
221 /* Interrupts are disabled. */
222 spin_lock(&iommu->lock);
223 arena_free(&iommu->arena, entry, npages);
224 spin_unlock_irqrestore(&iommu->lock, flags);
227 free_pages(first_page, order);
231 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
234 struct pci_pbm_info *pbm;
236 unsigned long flags, order, npages, entry;
239 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
240 iommu = dev->archdata.iommu;
241 pbm = dev->archdata.host_controller;
242 devhandle = pbm->devhandle;
243 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
245 spin_lock_irqsave(&iommu->lock, flags);
247 arena_free(&iommu->arena, entry, npages);
252 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
256 } while (npages != 0);
258 spin_unlock_irqrestore(&iommu->lock, flags);
260 order = get_order(size);
262 free_pages((unsigned long)cpu, order);
265 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
266 enum dma_data_direction direction)
269 unsigned long flags, npages, oaddr;
270 unsigned long i, base_paddr;
275 iommu = dev->archdata.iommu;
277 if (unlikely(direction == DMA_NONE))
280 oaddr = (unsigned long)ptr;
281 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
282 npages >>= IO_PAGE_SHIFT;
284 spin_lock_irqsave(&iommu->lock, flags);
285 entry = arena_alloc(&iommu->arena, npages);
286 spin_unlock_irqrestore(&iommu->lock, flags);
288 if (unlikely(entry < 0L))
291 bus_addr = (iommu->page_table_map_base +
292 (entry << IO_PAGE_SHIFT));
293 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
294 base_paddr = __pa(oaddr & IO_PAGE_MASK);
295 prot = HV_PCI_MAP_ATTR_READ;
296 if (direction != DMA_TO_DEVICE)
297 prot |= HV_PCI_MAP_ATTR_WRITE;
299 local_irq_save(flags);
301 iommu_batch_start(dev, prot, entry);
303 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
304 long err = iommu_batch_add(base_paddr);
305 if (unlikely(err < 0L))
308 if (unlikely(iommu_batch_end() < 0L))
311 local_irq_restore(flags);
316 if (printk_ratelimit())
318 return DMA_ERROR_CODE;
321 /* Interrupts are disabled. */
322 spin_lock(&iommu->lock);
323 arena_free(&iommu->arena, entry, npages);
324 spin_unlock_irqrestore(&iommu->lock, flags);
326 return DMA_ERROR_CODE;
329 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
330 size_t sz, enum dma_data_direction direction)
332 struct pci_pbm_info *pbm;
334 unsigned long flags, npages;
338 if (unlikely(direction == DMA_NONE)) {
339 if (printk_ratelimit())
344 iommu = dev->archdata.iommu;
345 pbm = dev->archdata.host_controller;
346 devhandle = pbm->devhandle;
348 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
349 npages >>= IO_PAGE_SHIFT;
350 bus_addr &= IO_PAGE_MASK;
352 spin_lock_irqsave(&iommu->lock, flags);
354 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
355 arena_free(&iommu->arena, entry, npages);
360 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
364 } while (npages != 0);
366 spin_unlock_irqrestore(&iommu->lock, flags);
369 #define SG_ENT_PHYS_ADDRESS(SG) \
370 (__pa(page_address((SG)->page)) + (SG)->offset)
372 static inline long fill_sg(long entry, struct device *dev,
373 struct scatterlist *sg,
374 int nused, int nelems, unsigned long prot)
376 struct scatterlist *dma_sg = sg;
377 struct scatterlist *sg_end = sg_last(sg, nelems);
381 local_irq_save(flags);
383 iommu_batch_start(dev, prot, entry);
385 for (i = 0; i < nused; i++) {
386 unsigned long pteval = ~0UL;
389 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
391 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
393 unsigned long offset;
396 /* If we are here, we know we have at least one
397 * more page to map. So walk forward until we
398 * hit a page crossing, and begin creating new
399 * mappings from that spot.
404 tmp = SG_ENT_PHYS_ADDRESS(sg);
406 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
407 pteval = tmp & IO_PAGE_MASK;
408 offset = tmp & (IO_PAGE_SIZE - 1UL);
411 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
412 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
414 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
420 pteval = (pteval & IOPTE_PAGE);
424 err = iommu_batch_add(pteval);
425 if (unlikely(err < 0L))
426 goto iommu_map_failed;
428 pteval += IO_PAGE_SIZE;
429 len -= (IO_PAGE_SIZE - offset);
434 pteval = (pteval & IOPTE_PAGE) + len;
437 /* Skip over any tail mappings we've fully mapped,
438 * adjusting pteval along the way. Stop when we
439 * detect a page crossing event.
441 while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
444 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
445 pteval += sg->length;
450 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
452 } while (dma_npages != 0);
453 dma_sg = sg_next(dma_sg);
456 if (unlikely(iommu_batch_end() < 0L))
457 goto iommu_map_failed;
459 local_irq_restore(flags);
463 local_irq_restore(flags);
467 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
468 int nelems, enum dma_data_direction direction)
471 unsigned long flags, npages, prot;
473 struct scatterlist *sgtmp;
477 /* Fast path single entry scatterlists. */
479 sglist->dma_address =
480 dma_4v_map_single(dev,
481 (page_address(sglist->page) +
483 sglist->length, direction);
484 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
486 sglist->dma_length = sglist->length;
490 iommu = dev->archdata.iommu;
492 if (unlikely(direction == DMA_NONE))
495 /* Step 1: Prepare scatter list. */
496 npages = prepare_sg(sglist, nelems);
498 /* Step 2: Allocate a cluster and context, if necessary. */
499 spin_lock_irqsave(&iommu->lock, flags);
500 entry = arena_alloc(&iommu->arena, npages);
501 spin_unlock_irqrestore(&iommu->lock, flags);
503 if (unlikely(entry < 0L))
506 dma_base = iommu->page_table_map_base +
507 (entry << IO_PAGE_SHIFT);
509 /* Step 3: Normalize DMA addresses. */
513 while (used && sgtmp->dma_length) {
514 sgtmp->dma_address += dma_base;
515 sgtmp = sg_next(sgtmp);
518 used = nelems - used;
520 /* Step 4: Create the mappings. */
521 prot = HV_PCI_MAP_ATTR_READ;
522 if (direction != DMA_TO_DEVICE)
523 prot |= HV_PCI_MAP_ATTR_WRITE;
525 err = fill_sg(entry, dev, sglist, used, nelems, prot);
526 if (unlikely(err < 0L))
527 goto iommu_map_failed;
532 if (printk_ratelimit())
537 spin_lock_irqsave(&iommu->lock, flags);
538 arena_free(&iommu->arena, entry, npages);
539 spin_unlock_irqrestore(&iommu->lock, flags);
544 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
545 int nelems, enum dma_data_direction direction)
547 struct pci_pbm_info *pbm;
549 unsigned long flags, i, npages;
550 struct scatterlist *sg, *sgprv;
552 u32 devhandle, bus_addr;
554 if (unlikely(direction == DMA_NONE)) {
555 if (printk_ratelimit())
559 iommu = dev->archdata.iommu;
560 pbm = dev->archdata.host_controller;
561 devhandle = pbm->devhandle;
563 bus_addr = sglist->dma_address & IO_PAGE_MASK;
565 for_each_sg(sglist, sg, nelems, i) {
566 if (sg->dma_length == 0)
572 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
573 bus_addr) >> IO_PAGE_SHIFT;
575 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
577 spin_lock_irqsave(&iommu->lock, flags);
579 arena_free(&iommu->arena, entry, npages);
584 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
588 } while (npages != 0);
590 spin_unlock_irqrestore(&iommu->lock, flags);
593 static void dma_4v_sync_single_for_cpu(struct device *dev,
594 dma_addr_t bus_addr, size_t sz,
595 enum dma_data_direction direction)
597 /* Nothing to do... */
600 static void dma_4v_sync_sg_for_cpu(struct device *dev,
601 struct scatterlist *sglist, int nelems,
602 enum dma_data_direction direction)
604 /* Nothing to do... */
607 const struct dma_ops sun4v_dma_ops = {
608 .alloc_coherent = dma_4v_alloc_coherent,
609 .free_coherent = dma_4v_free_coherent,
610 .map_single = dma_4v_map_single,
611 .unmap_single = dma_4v_unmap_single,
612 .map_sg = dma_4v_map_sg,
613 .unmap_sg = dma_4v_unmap_sg,
614 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
615 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
618 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
620 struct property *prop;
621 struct device_node *dp;
624 prop = of_find_property(dp, "66mhz-capable", NULL);
625 pbm->is_66mhz_capable = (prop != NULL);
626 pbm->pci_bus = pci_scan_one_pbm(pbm);
628 /* XXX register error interrupt handlers XXX */
631 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
634 struct iommu_arena *arena = &iommu->arena;
635 unsigned long i, cnt = 0;
638 devhandle = pbm->devhandle;
639 for (i = 0; i < arena->limit; i++) {
640 unsigned long ret, io_attrs, ra;
642 ret = pci_sun4v_iommu_getmap(devhandle,
646 if (page_in_phys_avail(ra)) {
647 pci_sun4v_iommu_demap(devhandle,
648 HV_PCI_TSBID(0, i), 1);
651 __set_bit(i, arena->map);
659 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
661 struct iommu *iommu = pbm->iommu;
662 struct property *prop;
663 unsigned long num_tsb_entries, sz, tsbsize;
664 u32 vdma[2], dma_mask, dma_offset;
666 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
668 u32 *val = prop->value;
673 /* No property, use default values. */
674 vdma[0] = 0x80000000;
675 vdma[1] = 0x80000000;
678 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
679 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
684 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
685 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
686 tsbsize = num_tsb_entries * sizeof(iopte_t);
688 dma_offset = vdma[0];
690 /* Setup initial software IOMMU state. */
691 spin_lock_init(&iommu->lock);
692 iommu->ctx_lowest_free = 1;
693 iommu->page_table_map_base = dma_offset;
694 iommu->dma_addr_mask = dma_mask;
696 /* Allocate and initialize the free area map. */
697 sz = (num_tsb_entries + 7) / 8;
698 sz = (sz + 7UL) & ~7UL;
699 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
700 if (!iommu->arena.map) {
701 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
704 iommu->arena.limit = num_tsb_entries;
706 sz = probe_existing_entries(pbm, iommu);
708 printk("%s: Imported %lu TSB entries from OBP\n",
712 #ifdef CONFIG_PCI_MSI
713 struct pci_sun4v_msiq_entry {
715 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
716 #define MSIQ_VERSION_SHIFT 32
717 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
718 #define MSIQ_TYPE_SHIFT 0
719 #define MSIQ_TYPE_NONE 0x00
720 #define MSIQ_TYPE_MSG 0x01
721 #define MSIQ_TYPE_MSI32 0x02
722 #define MSIQ_TYPE_MSI64 0x03
723 #define MSIQ_TYPE_INTX 0x08
724 #define MSIQ_TYPE_NONE2 0xff
729 u64 req_id; /* bus/device/func */
730 #define MSIQ_REQID_BUS_MASK 0xff00UL
731 #define MSIQ_REQID_BUS_SHIFT 8
732 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
733 #define MSIQ_REQID_DEVICE_SHIFT 3
734 #define MSIQ_REQID_FUNC_MASK 0x0007UL
735 #define MSIQ_REQID_FUNC_SHIFT 0
739 /* The format of this value is message type dependent.
740 * For MSI bits 15:0 are the data from the MSI packet.
741 * For MSI-X bits 31:0 are the data from the MSI packet.
742 * For MSG, the message code and message routing code where:
743 * bits 39:32 is the bus/device/fn of the msg target-id
744 * bits 18:16 is the message routing code
745 * bits 7:0 is the message code
746 * For INTx the low order 2-bits are:
757 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
760 unsigned long err, limit;
762 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
766 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
767 if (unlikely(*head >= limit))
773 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
774 unsigned long msiqid, unsigned long *head,
777 struct pci_sun4v_msiq_entry *ep;
778 unsigned long err, type;
780 /* Note: void pointer arithmetic, 'head' is a byte offset */
781 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
782 (pbm->msiq_ent_count *
783 sizeof(struct pci_sun4v_msiq_entry))) +
786 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
789 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
790 if (unlikely(type != MSIQ_TYPE_MSI32 &&
791 type != MSIQ_TYPE_MSI64))
796 err = pci_sun4v_msi_setstate(pbm->devhandle,
797 ep->msi_data /* msi_num */,
802 /* Clear the entry. */
803 ep->version_type &= ~MSIQ_TYPE_MASK;
805 (*head) += sizeof(struct pci_sun4v_msiq_entry);
807 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
813 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
818 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
825 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
826 unsigned long msi, int is_msi64)
828 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
830 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
832 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
834 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
839 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
841 unsigned long err, msiqid;
843 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
847 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
852 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
854 unsigned long q_size, alloc_size, pages, order;
857 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
858 alloc_size = (pbm->msiq_num * q_size);
859 order = get_order(alloc_size);
860 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
862 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
866 memset((char *)pages, 0, PAGE_SIZE << order);
867 pbm->msi_queues = (void *) pages;
869 for (i = 0; i < pbm->msiq_num; i++) {
870 unsigned long err, base = __pa(pages + (i * q_size));
871 unsigned long ret1, ret2;
873 err = pci_sun4v_msiq_conf(pbm->devhandle,
875 base, pbm->msiq_ent_count);
877 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
882 err = pci_sun4v_msiq_info(pbm->devhandle,
886 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
890 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
891 printk(KERN_ERR "MSI: Bogus qconf "
892 "expected[%lx:%x] got[%lx:%lx]\n",
893 base, pbm->msiq_ent_count,
902 free_pages(pages, order);
906 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
908 unsigned long q_size, alloc_size, pages, order;
911 for (i = 0; i < pbm->msiq_num; i++) {
912 unsigned long msiqid = pbm->msiq_first + i;
914 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
917 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
918 alloc_size = (pbm->msiq_num * q_size);
919 order = get_order(alloc_size);
921 pages = (unsigned long) pbm->msi_queues;
923 free_pages(pages, order);
925 pbm->msi_queues = NULL;
928 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
929 unsigned long msiqid,
930 unsigned long devino)
932 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
937 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
939 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
945 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
946 .get_head = pci_sun4v_get_head,
947 .dequeue_msi = pci_sun4v_dequeue_msi,
948 .set_head = pci_sun4v_set_head,
949 .msi_setup = pci_sun4v_msi_setup,
950 .msi_teardown = pci_sun4v_msi_teardown,
951 .msiq_alloc = pci_sun4v_msiq_alloc,
952 .msiq_free = pci_sun4v_msiq_free,
953 .msiq_build_irq = pci_sun4v_msiq_build_irq,
956 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
958 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
960 #else /* CONFIG_PCI_MSI */
961 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
964 #endif /* !(CONFIG_PCI_MSI) */
966 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
968 struct pci_pbm_info *pbm;
970 if (devhandle & 0x40)
975 pbm->next = pci_pbm_root;
978 pbm->scan_bus = pci_sun4v_scan_bus;
979 pbm->pci_ops = &sun4v_pci_ops;
980 pbm->config_space_reg_bits = 12;
982 pbm->index = pci_num_pbms++;
987 pbm->devhandle = devhandle;
989 pbm->name = dp->full_name;
991 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
993 pci_determine_mem_io_space(pbm);
995 pci_get_pbm_props(pbm);
996 pci_sun4v_iommu_init(pbm);
997 pci_sun4v_msi_init(pbm);
1000 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1002 static int hvapi_negotiated = 0;
1003 struct pci_controller_info *p;
1004 struct pci_pbm_info *pbm;
1005 struct iommu *iommu;
1006 struct property *prop;
1007 struct linux_prom64_registers *regs;
1011 if (!hvapi_negotiated++) {
1012 int err = sun4v_hvapi_register(HV_GRP_PCI,
1017 prom_printf("SUN4V_PCI: Could not register hvapi, "
1021 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1022 vpci_major, vpci_minor);
1024 dma_ops = &sun4v_dma_ops;
1027 prop = of_find_property(dp, "reg", NULL);
1030 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1032 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1033 if (pbm->devhandle == (devhandle ^ 0x40)) {
1034 pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
1039 for_each_possible_cpu(i) {
1040 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1043 goto fatal_memory_error;
1045 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1048 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1050 goto fatal_memory_error;
1052 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1054 goto fatal_memory_error;
1056 p->pbm_A.iommu = iommu;
1058 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1060 goto fatal_memory_error;
1062 p->pbm_B.iommu = iommu;
1064 pci_sun4v_pbm_init(p, dp, devhandle);
1068 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");