1 /* pci_sun4v.c: SUN4V specific PCI controller support.
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
15 #include <asm/iommu.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
23 #include "iommu_common.h"
25 #include "pci_sun4v.h"
27 #define PGLIST_NENTS 2048
30 u64 pglist[PGLIST_NENTS];
33 static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
35 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
37 unsigned long n, i, start, end, limit;
45 n = find_next_zero_bit(arena->map, limit, start);
47 if (unlikely(end >= limit)) {
48 if (likely(pass < 1)) {
54 /* Scanned the whole thing, give up. */
59 for (i = n; i < end; i++) {
60 if (test_bit(i, arena->map)) {
66 for (i = n; i < end; i++)
67 __set_bit(i, arena->map);
74 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
78 for (i = base; i < (base + npages); i++)
79 __clear_bit(i, arena->map);
82 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
84 struct pcidev_cookie *pcp;
85 struct pci_iommu *iommu;
86 unsigned long devhandle, flags, order, first_page, npages, n;
92 size = IO_PAGE_ALIGN(size);
93 order = get_order(size);
94 if (order >= MAX_ORDER)
97 npages = size >> IO_PAGE_SHIFT;
98 if (npages > PGLIST_NENTS)
101 first_page = __get_free_pages(GFP_ATOMIC, order);
102 if (first_page == 0UL)
104 memset((char *)first_page, 0, PAGE_SIZE << order);
107 devhandle = pcp->pbm->devhandle;
108 iommu = pcp->pbm->iommu;
110 spin_lock_irqsave(&iommu->lock, flags);
111 entry = pci_arena_alloc(&iommu->arena, npages);
112 spin_unlock_irqrestore(&iommu->lock, flags);
114 if (unlikely(entry < 0L)) {
115 free_pages(first_page, order);
119 *dma_addrp = (iommu->page_table_map_base +
120 (entry << IO_PAGE_SHIFT));
121 ret = (void *) first_page;
122 first_page = __pa(first_page);
126 pglist = &__get_cpu_var(iommu_pglists).pglist[0];
127 for (n = 0; n < npages; n++)
128 pglist[n] = first_page + (n * PAGE_SIZE);
133 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
135 (HV_PCI_MAP_ATTR_READ |
136 HV_PCI_MAP_ATTR_WRITE),
141 } while (npages != 0);
148 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
150 struct pcidev_cookie *pcp;
151 struct pci_iommu *iommu;
152 unsigned long flags, order, npages, entry, devhandle;
154 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
156 iommu = pcp->pbm->iommu;
157 devhandle = pcp->pbm->devhandle;
158 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
160 spin_lock_irqsave(&iommu->lock, flags);
162 pci_arena_free(&iommu->arena, entry, npages);
167 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
171 } while (npages != 0);
173 spin_unlock_irqrestore(&iommu->lock, flags);
175 order = get_order(size);
177 free_pages((unsigned long)cpu, order);
180 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
182 struct pcidev_cookie *pcp;
183 struct pci_iommu *iommu;
184 unsigned long flags, npages, oaddr;
185 unsigned long i, base_paddr, devhandle;
193 iommu = pcp->pbm->iommu;
194 devhandle = pcp->pbm->devhandle;
196 if (unlikely(direction == PCI_DMA_NONE))
199 oaddr = (unsigned long)ptr;
200 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
201 npages >>= IO_PAGE_SHIFT;
202 if (unlikely(npages > PGLIST_NENTS))
205 spin_lock_irqsave(&iommu->lock, flags);
206 entry = pci_arena_alloc(&iommu->arena, npages);
207 spin_unlock_irqrestore(&iommu->lock, flags);
209 if (unlikely(entry < 0L))
212 bus_addr = (iommu->page_table_map_base +
213 (entry << IO_PAGE_SHIFT));
214 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
215 base_paddr = __pa(oaddr & IO_PAGE_MASK);
216 prot = HV_PCI_MAP_ATTR_READ;
217 if (direction != PCI_DMA_TODEVICE)
218 prot |= HV_PCI_MAP_ATTR_WRITE;
222 pglist = &__get_cpu_var(iommu_pglists).pglist[0];
223 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
224 pglist[i] = base_paddr;
229 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
235 } while (npages != 0);
242 if (printk_ratelimit())
244 return PCI_DMA_ERROR_CODE;
247 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
249 struct pcidev_cookie *pcp;
250 struct pci_iommu *iommu;
251 unsigned long flags, npages, devhandle;
254 if (unlikely(direction == PCI_DMA_NONE)) {
255 if (printk_ratelimit())
261 iommu = pcp->pbm->iommu;
262 devhandle = pcp->pbm->devhandle;
264 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
265 npages >>= IO_PAGE_SHIFT;
266 bus_addr &= IO_PAGE_MASK;
268 spin_lock_irqsave(&iommu->lock, flags);
270 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
271 pci_arena_free(&iommu->arena, entry, npages);
276 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
280 } while (npages != 0);
282 spin_unlock_irqrestore(&iommu->lock, flags);
285 #define SG_ENT_PHYS_ADDRESS(SG) \
286 (__pa(page_address((SG)->page)) + (SG)->offset)
288 static inline void fill_sg(long entry, unsigned long devhandle,
289 struct scatterlist *sg,
290 int nused, int nelems, unsigned long prot)
292 struct scatterlist *dma_sg = sg;
293 struct scatterlist *sg_end = sg + nelems;
294 int i, cpu, pglist_ent;
298 pglist = &__get_cpu_var(iommu_pglists).pglist[0];
300 for (i = 0; i < nused; i++) {
301 unsigned long pteval = ~0UL;
304 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
306 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
308 unsigned long offset;
311 /* If we are here, we know we have at least one
312 * more page to map. So walk forward until we
313 * hit a page crossing, and begin creating new
314 * mappings from that spot.
319 tmp = SG_ENT_PHYS_ADDRESS(sg);
321 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
322 pteval = tmp & IO_PAGE_MASK;
323 offset = tmp & (IO_PAGE_SIZE - 1UL);
326 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
327 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
329 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
335 pteval = (pteval & IOPTE_PAGE);
337 pglist[pglist_ent++] = pteval;
338 pteval += IO_PAGE_SIZE;
339 len -= (IO_PAGE_SIZE - offset);
344 pteval = (pteval & IOPTE_PAGE) + len;
347 /* Skip over any tail mappings we've fully mapped,
348 * adjusting pteval along the way. Stop when we
349 * detect a page crossing event.
351 while (sg < sg_end &&
352 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
353 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
355 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
356 pteval += sg->length;
359 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
361 } while (dma_npages != 0);
365 BUG_ON(pglist_ent == 0);
370 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
374 } while (pglist_ent != 0);
379 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
381 struct pcidev_cookie *pcp;
382 struct pci_iommu *iommu;
383 unsigned long flags, npages, prot, devhandle;
385 struct scatterlist *sgtmp;
389 /* Fast path single entry scatterlists. */
391 sglist->dma_address =
392 pci_4v_map_single(pdev,
393 (page_address(sglist->page) + sglist->offset),
394 sglist->length, direction);
395 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
397 sglist->dma_length = sglist->length;
402 iommu = pcp->pbm->iommu;
403 devhandle = pcp->pbm->devhandle;
405 if (unlikely(direction == PCI_DMA_NONE))
408 /* Step 1: Prepare scatter list. */
409 npages = prepare_sg(sglist, nelems);
410 if (unlikely(npages > PGLIST_NENTS))
413 /* Step 2: Allocate a cluster and context, if necessary. */
414 spin_lock_irqsave(&iommu->lock, flags);
415 entry = pci_arena_alloc(&iommu->arena, npages);
416 spin_unlock_irqrestore(&iommu->lock, flags);
418 if (unlikely(entry < 0L))
421 dma_base = iommu->page_table_map_base +
422 (entry << IO_PAGE_SHIFT);
424 /* Step 3: Normalize DMA addresses. */
428 while (used && sgtmp->dma_length) {
429 sgtmp->dma_address += dma_base;
433 used = nelems - used;
435 /* Step 4: Create the mappings. */
436 prot = HV_PCI_MAP_ATTR_READ;
437 if (direction != PCI_DMA_TODEVICE)
438 prot |= HV_PCI_MAP_ATTR_WRITE;
440 fill_sg(entry, devhandle, sglist, used, nelems, prot);
445 if (printk_ratelimit())
450 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
452 struct pcidev_cookie *pcp;
453 struct pci_iommu *iommu;
454 unsigned long flags, i, npages, devhandle;
458 if (unlikely(direction == PCI_DMA_NONE)) {
459 if (printk_ratelimit())
464 iommu = pcp->pbm->iommu;
465 devhandle = pcp->pbm->devhandle;
467 bus_addr = sglist->dma_address & IO_PAGE_MASK;
469 for (i = 1; i < nelems; i++)
470 if (sglist[i].dma_length == 0)
473 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
474 bus_addr) >> IO_PAGE_SHIFT;
476 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
478 spin_lock_irqsave(&iommu->lock, flags);
480 pci_arena_free(&iommu->arena, entry, npages);
485 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
489 } while (npages != 0);
491 spin_unlock_irqrestore(&iommu->lock, flags);
494 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
496 /* Nothing to do... */
499 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
501 /* Nothing to do... */
504 struct pci_iommu_ops pci_sun4v_iommu_ops = {
505 .alloc_consistent = pci_4v_alloc_consistent,
506 .free_consistent = pci_4v_free_consistent,
507 .map_single = pci_4v_map_single,
508 .unmap_single = pci_4v_unmap_single,
509 .map_sg = pci_4v_map_sg,
510 .unmap_sg = pci_4v_unmap_sg,
511 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
512 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
515 /* SUN4V PCI configuration space accessors. */
517 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
518 int where, int size, u32 *value)
520 struct pci_pbm_info *pbm = bus_dev->sysdata;
521 unsigned long devhandle = pbm->devhandle;
522 unsigned int bus = bus_dev->number;
523 unsigned int device = PCI_SLOT(devfn);
524 unsigned int func = PCI_FUNC(devfn);
527 ret = pci_sun4v_config_get(devhandle,
528 HV_PCI_DEVICE_BUILD(bus, device, func),
535 *value = ret & 0xffff;
538 *value = ret & 0xffffffff;
543 return PCIBIOS_SUCCESSFUL;
546 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
547 int where, int size, u32 value)
549 struct pci_pbm_info *pbm = bus_dev->sysdata;
550 unsigned long devhandle = pbm->devhandle;
551 unsigned int bus = bus_dev->number;
552 unsigned int device = PCI_SLOT(devfn);
553 unsigned int func = PCI_FUNC(devfn);
556 ret = pci_sun4v_config_put(devhandle,
557 HV_PCI_DEVICE_BUILD(bus, device, func),
560 return PCIBIOS_SUCCESSFUL;
563 static struct pci_ops pci_sun4v_ops = {
564 .read = pci_sun4v_read_pci_cfg,
565 .write = pci_sun4v_write_pci_cfg,
569 static void pbm_scan_bus(struct pci_controller_info *p,
570 struct pci_pbm_info *pbm)
572 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
575 prom_printf("%s: Critical allocation failure.\n", pbm->name);
579 /* All we care about is the PBM. */
580 memset(cookie, 0, sizeof(*cookie));
583 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
586 pci_fixup_host_bridge_self(pbm->pci_bus);
587 pbm->pci_bus->self->sysdata = cookie;
589 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
590 pci_record_assignments(pbm, pbm->pci_bus);
591 pci_assign_unassigned(pbm, pbm->pci_bus);
592 pci_fixup_irq(pbm, pbm->pci_bus);
593 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
594 pci_setup_busmastering(pbm, pbm->pci_bus);
597 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
599 if (p->pbm_A.prom_node) {
600 p->pbm_A.is_66mhz_capable =
601 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
603 pbm_scan_bus(p, &p->pbm_A);
605 if (p->pbm_B.prom_node) {
606 p->pbm_B.is_66mhz_capable =
607 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
609 pbm_scan_bus(p, &p->pbm_B);
612 /* XXX register error interrupt handlers XXX */
615 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
616 struct pci_dev *pdev,
619 /* XXX Implement me! XXX */
623 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
625 struct pcidev_cookie *pcp = pdev->sysdata;
626 struct pci_pbm_info *pbm = pcp->pbm;
627 struct resource *res, *root;
629 int where, size, is_64bit;
631 res = &pdev->resource[resource];
633 where = PCI_BASE_ADDRESS_0 + (resource * 4);
634 } else if (resource == PCI_ROM_RESOURCE) {
635 where = pdev->rom_base_reg;
637 /* Somebody might have asked allocation of a non-standard resource */
641 /* XXX 64-bit MEM handling is not %100 correct... XXX */
643 if (res->flags & IORESOURCE_IO)
644 root = &pbm->io_space;
646 root = &pbm->mem_space;
647 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
648 == PCI_BASE_ADDRESS_MEM_TYPE_64)
652 size = res->end - res->start;
653 pci_read_config_dword(pdev, where, ®);
654 reg = ((reg & size) |
655 (((u32)(res->start - root->start)) & ~size));
656 if (resource == PCI_ROM_RESOURCE) {
657 reg |= PCI_ROM_ADDRESS_ENABLE;
658 res->flags |= IORESOURCE_ROM_ENABLE;
660 pci_write_config_dword(pdev, where, reg);
662 /* This knows that the upper 32-bits of the address
663 * must be zero. Our PCI common layer enforces this.
666 pci_write_config_dword(pdev, where + 4, 0);
669 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
670 struct resource *res,
671 struct resource *root)
673 res->start += root->start;
674 res->end += root->start;
677 /* Use ranges property to determine where PCI MEM, I/O, and Config
678 * space are for this PCI bus module.
680 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
682 int i, saw_mem, saw_io;
684 saw_mem = saw_io = 0;
685 for (i = 0; i < pbm->num_pbm_ranges; i++) {
686 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
690 type = (pr->child_phys_hi >> 24) & 0x3;
691 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
692 ((unsigned long)pr->parent_phys_lo << 0UL));
696 /* 16-bit IO space, 16MB */
697 pbm->io_space.start = a;
698 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
699 pbm->io_space.flags = IORESOURCE_IO;
704 /* 32-bit MEM space, 2GB */
705 pbm->mem_space.start = a;
706 pbm->mem_space.end = a + (0x80000000UL - 1UL);
707 pbm->mem_space.flags = IORESOURCE_MEM;
712 /* XXX 64-bit MEM handling XXX */
719 if (!saw_io || !saw_mem) {
720 prom_printf("%s: Fatal error, missing %s PBM range.\n",
722 (!saw_io ? "IO" : "MEM"));
726 printk("%s: PCI IO[%lx] MEM[%lx]\n",
729 pbm->mem_space.start);
732 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
733 struct pci_pbm_info *pbm)
735 pbm->io_space.name = pbm->mem_space.name = pbm->name;
737 request_resource(&ioport_resource, &pbm->io_space);
738 request_resource(&iomem_resource, &pbm->mem_space);
739 pci_register_legacy_regions(&pbm->io_space,
743 static void probe_existing_entries(struct pci_pbm_info *pbm,
744 struct pci_iommu *iommu)
746 struct pci_iommu_arena *arena = &iommu->arena;
747 unsigned long i, devhandle;
749 devhandle = pbm->devhandle;
750 for (i = 0; i < arena->limit; i++) {
751 unsigned long ret, io_attrs, ra;
753 ret = pci_sun4v_iommu_getmap(devhandle,
757 __set_bit(i, arena->map);
761 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
763 struct pci_iommu *iommu = pbm->iommu;
764 unsigned long num_tsb_entries, sz;
765 u32 vdma[2], dma_mask, dma_offset;
768 err = prom_getproperty(pbm->prom_node, "virtual-dma",
769 (char *)&vdma[0], sizeof(vdma));
770 if (err == 0 || err == -1) {
771 /* No property, use default values. */
772 vdma[0] = 0x80000000;
773 vdma[1] = 0x80000000;
779 dma_mask |= 0x1fffffff;
784 dma_mask |= 0x3fffffff;
789 dma_mask |= 0x7fffffff;
794 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
798 num_tsb_entries = tsbsize / sizeof(iopte_t);
800 dma_offset = vdma[0];
802 /* Setup initial software IOMMU state. */
803 spin_lock_init(&iommu->lock);
804 iommu->ctx_lowest_free = 1;
805 iommu->page_table_map_base = dma_offset;
806 iommu->dma_addr_mask = dma_mask;
808 /* Allocate and initialize the free area map. */
809 sz = num_tsb_entries / 8;
810 sz = (sz + 7UL) & ~7UL;
811 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
812 if (!iommu->arena.map) {
813 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
816 memset(iommu->arena.map, 0, sz);
817 iommu->arena.limit = num_tsb_entries;
819 probe_existing_entries(pbm, iommu);
822 static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle)
824 struct pci_pbm_info *pbm;
825 unsigned int busrange[2];
828 if (devhandle & 0x40)
834 pbm->prom_node = prom_node;
835 pbm->pci_first_slot = 1;
837 pbm->devhandle = devhandle;
839 sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
840 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
842 printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
844 prom_getstring(prom_node, "name",
845 pbm->prom_name, sizeof(pbm->prom_name));
847 err = prom_getproperty(prom_node, "ranges",
848 (char *) pbm->pbm_ranges,
849 sizeof(pbm->pbm_ranges));
850 if (err == 0 || err == -1) {
851 prom_printf("%s: Fatal error, no ranges property.\n",
856 pbm->num_pbm_ranges =
857 (err / sizeof(struct linux_prom_pci_ranges));
859 /* Mask out the top 8 bits of the ranges, leaving the real
862 for (i = 0; i < pbm->num_pbm_ranges; i++)
863 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
865 pci_sun4v_determine_mem_io_space(pbm);
866 pbm_register_toplevel_resources(p, pbm);
868 err = prom_getproperty(prom_node, "interrupt-map",
869 (char *)pbm->pbm_intmap,
870 sizeof(pbm->pbm_intmap));
872 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
873 err = prom_getproperty(prom_node, "interrupt-map-mask",
874 (char *)&pbm->pbm_intmask,
875 sizeof(pbm->pbm_intmask));
877 prom_printf("%s: Fatal error, no "
878 "interrupt-map-mask.\n", pbm->name);
882 pbm->num_pbm_intmap = 0;
883 memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
886 err = prom_getproperty(prom_node, "bus-range",
887 (char *)&busrange[0],
889 if (err == 0 || err == -1) {
890 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
893 pbm->pci_first_busno = busrange[0];
894 pbm->pci_last_busno = busrange[1];
896 pci_sun4v_iommu_init(pbm);
899 void sun4v_pci_init(int node, char *model_name)
901 struct pci_controller_info *p;
902 struct pci_iommu *iommu;
903 struct linux_prom64_registers regs;
904 unsigned int devhandle;
906 prom_getproperty(node, "reg", (char *)®s, sizeof(regs));
907 devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;;
909 for (p = pci_controller_root; p; p = p->next) {
910 struct pci_pbm_info *pbm;
912 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
915 pbm = (p->pbm_A.prom_node ?
919 if (pbm->devhandle == (devhandle ^ 0x40))
920 pci_sun4v_pbm_init(p, node, devhandle);
923 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
925 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
928 memset(p, 0, sizeof(*p));
930 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
932 prom_printf("SCHIZO: Fatal memory allocation error.\n");
935 memset(iommu, 0, sizeof(*iommu));
936 p->pbm_A.iommu = iommu;
938 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
940 prom_printf("SCHIZO: Fatal memory allocation error.\n");
943 memset(iommu, 0, sizeof(*iommu));
944 p->pbm_B.iommu = iommu;
946 p->next = pci_controller_root;
947 pci_controller_root = p;
949 p->index = pci_num_controllers++;
950 p->pbms_same_domain = 0;
952 p->scan_bus = pci_sun4v_scan_bus;
953 p->irq_build = pci_sun4v_irq_build;
954 p->base_address_update = pci_sun4v_base_address_update;
955 p->resource_adjust = pci_sun4v_resource_adjust;
956 p->pci_ops = &pci_sun4v_ops;
958 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
961 pci_memspace_mask = 0x7fffffffUL;
963 pci_sun4v_pbm_init(p, node, devhandle);
965 prom_printf("sun4v_pci_init: Implement me.\n");