2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
15 #include <asm/hwrpb.h>
23 # define DBGA(args...) printk(KERN_DEBUG args)
25 # define DBGA(args...)
28 # define DBGA2(args...) printk(KERN_DEBUG args)
30 # define DBGA2(args...)
33 #define DEBUG_NODIRECT 0
35 #define ISA_DMA_MASK 0x00ffffff
37 static inline unsigned long
38 mk_iommu_pte(unsigned long paddr)
40 return (paddr >> (PAGE_SHIFT-1)) | 1;
44 calc_npages(long bytes)
46 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
50 /* Return the minimum of MAX or the first power of two larger
54 size_for_memory(unsigned long max)
56 unsigned long mem = max_low_pfn << PAGE_SHIFT;
58 max = roundup_pow_of_two(mem);
62 struct pci_iommu_arena * __init
63 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
64 unsigned long window_size, unsigned long align)
66 unsigned long mem_size;
67 struct pci_iommu_arena *arena;
69 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
71 /* Note that the TLB lookup logic uses bitwise concatenation,
72 not addition, so the required arena alignment is based on
73 the size of the window. Retain the align parameter so that
74 particular systems can over-align the arena. */
79 #ifdef CONFIG_DISCONTIGMEM
81 if (!NODE_DATA(nid) ||
82 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
84 printk("%s: couldn't allocate arena from node %d\n"
85 " falling back to system-wide allocation\n",
87 arena = alloc_bootmem(sizeof(*arena));
90 if (!NODE_DATA(nid) ||
91 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
95 printk("%s: couldn't allocate arena ptes from node %d\n"
96 " falling back to system-wide allocation\n",
98 arena->ptes = __alloc_bootmem(mem_size, align, 0);
101 #else /* CONFIG_DISCONTIGMEM */
103 arena = alloc_bootmem(sizeof(*arena));
104 arena->ptes = __alloc_bootmem(mem_size, align, 0);
106 #endif /* CONFIG_DISCONTIGMEM */
108 spin_lock_init(&arena->lock);
110 arena->dma_base = base;
111 arena->size = window_size;
112 arena->next_entry = 0;
114 /* Align allocations to a multiple of a page size. Not needed
115 unless there are chip bugs. */
116 arena->align_entry = 1;
121 struct pci_iommu_arena * __init
122 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
123 unsigned long window_size, unsigned long align)
125 return iommu_arena_new_node(0, hose, base, window_size, align);
128 static inline int is_span_boundary(unsigned int index, unsigned int nr,
130 unsigned long boundary_size)
132 shift = (shift + index) & (boundary_size - 1);
133 return shift + nr > boundary_size;
136 /* Must be called with the arena lock held */
138 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
145 unsigned long boundary_size;
147 base = arena->dma_base >> PAGE_SHIFT;
149 boundary_size = dma_get_seg_boundary(dev) + 1;
150 BUG_ON(!is_power_of_2(boundary_size));
151 boundary_size >>= PAGE_SHIFT;
153 boundary_size = 1UL << (32 - PAGE_SHIFT);
156 /* Search forward for the first mask-aligned sequence of N free ptes */
158 nent = arena->size >> PAGE_SHIFT;
159 p = ALIGN(arena->next_entry, mask + 1);
163 while (i < n && p+i < nent) {
164 if (!i && is_span_boundary(p, n, base, boundary_size)) {
165 p = ALIGN(p + 1, mask + 1);
170 p = ALIGN(p + i + 1, mask + 1), i = 0;
178 * Reached the end. Flush the TLB and restart
179 * the search from the beginning.
181 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
191 /* Success. It's the responsibility of the caller to mark them
192 in use before releasing the lock */
197 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
204 spin_lock_irqsave(&arena->lock, flags);
206 /* Search for N empty ptes */
208 mask = max(align, arena->align_entry) - 1;
209 p = iommu_arena_find_pages(dev, arena, n, mask);
211 spin_unlock_irqrestore(&arena->lock, flags);
215 /* Success. Mark them all in use, ie not zero and invalid
216 for the iommu tlb that could load them from under us.
217 The chip specific bits will fill this in with something
218 kosher when we return. */
219 for (i = 0; i < n; ++i)
220 ptes[p+i] = IOMMU_INVALID_PTE;
222 arena->next_entry = p + n;
223 spin_unlock_irqrestore(&arena->lock, flags);
229 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
234 p = arena->ptes + ofs;
235 for (i = 0; i < n; ++i)
239 /* True if the machine supports DAC addressing, and DEV can
240 make use of it given MASK. */
241 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
243 /* Map a single buffer of the indicated size for PCI DMA in streaming
244 mode. The 32-bit PCI bus mastering address to use is returned.
245 Once the device is given the dma address, the device owns this memory
246 until either pci_unmap_single or pci_dma_sync_single is performed. */
249 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
252 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
253 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
254 struct pci_iommu_arena *arena;
255 long npages, dma_ofs, i;
258 unsigned int align = 0;
259 struct device *dev = pdev ? &pdev->dev : NULL;
261 paddr = __pa(cpu_addr);
264 /* First check to see if we can use the direct map window. */
265 if (paddr + size + __direct_map_base - 1 <= max_dma
266 && paddr + size <= __direct_map_size) {
267 ret = paddr + __direct_map_base;
269 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
270 cpu_addr, size, ret, __builtin_return_address(0));
276 /* Next, use DAC if selected earlier. */
278 ret = paddr + alpha_mv.pci_dac_offset;
280 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
281 cpu_addr, size, ret, __builtin_return_address(0));
286 /* If the machine doesn't define a pci_tbi routine, we have to
287 assume it doesn't support sg mapping, and, since we tried to
288 use direct_map above, it now must be considered an error. */
289 if (! alpha_mv.mv_pci_tbi) {
290 static int been_here = 0; /* Only print the message once. */
292 printk(KERN_WARNING "pci_map_single: no HW sg\n");
298 arena = hose->sg_pci;
299 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
300 arena = hose->sg_isa;
302 npages = calc_npages((paddr & ~PAGE_MASK) + size);
304 /* Force allocation to 64KB boundary for ISA bridges. */
305 if (pdev && pdev == isa_bridge)
307 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
309 printk(KERN_WARNING "pci_map_single failed: "
310 "could not allocate dma page tables\n");
315 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
316 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
318 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
319 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
321 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
322 cpu_addr, size, npages, ret, __builtin_return_address(0));
328 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
332 if (dir == PCI_DMA_NONE)
335 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
336 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
338 EXPORT_SYMBOL(pci_map_single);
341 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
342 size_t size, int dir)
346 if (dir == PCI_DMA_NONE)
349 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
350 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
353 EXPORT_SYMBOL(pci_map_page);
355 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
356 SIZE must match what was provided for in a previous pci_map_single
357 call. All other usages are undefined. After this call, reads by
358 the cpu to the buffer are guaranteed to see whatever the device
362 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
366 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
367 struct pci_iommu_arena *arena;
368 long dma_ofs, npages;
370 if (direction == PCI_DMA_NONE)
373 if (dma_addr >= __direct_map_base
374 && dma_addr < __direct_map_base + __direct_map_size) {
377 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
378 dma_addr, size, __builtin_return_address(0));
383 if (dma_addr > 0xffffffff) {
384 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
385 dma_addr, size, __builtin_return_address(0));
389 arena = hose->sg_pci;
390 if (!arena || dma_addr < arena->dma_base)
391 arena = hose->sg_isa;
393 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
394 if (dma_ofs * PAGE_SIZE >= arena->size) {
395 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
396 " base %lx size %x\n", dma_addr, arena->dma_base,
402 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
404 spin_lock_irqsave(&arena->lock, flags);
406 iommu_arena_free(arena, dma_ofs, npages);
408 /* If we're freeing ptes above the `next_entry' pointer (they
409 may have snuck back into the TLB since the last wrap flush),
410 we need to flush the TLB before reallocating the latter. */
411 if (dma_ofs >= arena->next_entry)
412 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
414 spin_unlock_irqrestore(&arena->lock, flags);
416 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
417 dma_addr, size, npages, __builtin_return_address(0));
419 EXPORT_SYMBOL(pci_unmap_single);
422 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
423 size_t size, int direction)
425 pci_unmap_single(pdev, dma_addr, size, direction);
427 EXPORT_SYMBOL(pci_unmap_page);
429 /* Allocate and map kernel buffer using consistent mode DMA for PCI
430 device. Returns non-NULL cpu-view pointer to the buffer if
431 successful and sets *DMA_ADDRP to the pci side dma address as well,
432 else DMA_ADDRP is undefined. */
435 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
438 long order = get_order(size);
439 gfp_t gfp = GFP_ATOMIC;
442 cpu_addr = (void *)__get_free_pages(gfp, order);
444 printk(KERN_INFO "pci_alloc_consistent: "
445 "get_free_pages failed from %p\n",
446 __builtin_return_address(0));
447 /* ??? Really atomic allocation? Otherwise we could play
448 with vmalloc and sg if we can't find contiguous memory. */
451 memset(cpu_addr, 0, size);
453 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
454 if (*dma_addrp == 0) {
455 free_pages((unsigned long)cpu_addr, order);
456 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
458 /* The address doesn't fit required mask and we
459 do not have iommu. Try again with GFP_DMA. */
464 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
465 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
469 EXPORT_SYMBOL(pci_alloc_consistent);
471 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
472 be values that were returned from pci_alloc_consistent. SIZE must
473 be the same as what as passed into pci_alloc_consistent.
474 References to the memory and mappings associated with CPU_ADDR or
475 DMA_ADDR past this call are illegal. */
478 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
481 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
482 free_pages((unsigned long)cpu_addr, get_order(size));
484 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
485 dma_addr, size, __builtin_return_address(0));
487 EXPORT_SYMBOL(pci_free_consistent);
489 /* Classify the elements of the scatterlist. Write dma_address
490 of each element with:
491 0 : Followers all physically adjacent.
492 1 : Followers all virtually adjacent.
493 -1 : Not leader, physically adjacent to previous.
494 -2 : Not leader, virtually adjacent to previous.
495 Write dma_length of each leader with the combined lengths of
496 the mergable followers. */
498 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
499 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
502 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
505 unsigned long next_paddr;
506 struct scatterlist *leader;
507 long leader_flag, leader_length;
508 unsigned int max_seg_size;
512 leader_length = leader->length;
513 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
515 /* we will not marge sg without device. */
516 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
517 for (++sg; sg < end; ++sg) {
518 unsigned long addr, len;
519 addr = SG_ENT_PHYS_ADDRESS(sg);
522 if (leader_length + len > max_seg_size)
525 if (next_paddr == addr) {
526 sg->dma_address = -1;
527 leader_length += len;
528 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
529 sg->dma_address = -2;
531 leader_length += len;
534 leader->dma_address = leader_flag;
535 leader->dma_length = leader_length;
541 next_paddr = addr + len;
544 leader->dma_address = leader_flag;
545 leader->dma_length = leader_length;
548 /* Given a scatterlist leader, choose an allocation method and fill
552 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
553 struct scatterlist *out, struct pci_iommu_arena *arena,
554 dma_addr_t max_dma, int dac_allowed)
556 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
557 long size = leader->dma_length;
558 struct scatterlist *sg;
560 long npages, dma_ofs, i;
563 /* If everything is physically contiguous, and the addresses
564 fall into the direct-map window, use it. */
565 if (leader->dma_address == 0
566 && paddr + size + __direct_map_base - 1 <= max_dma
567 && paddr + size <= __direct_map_size) {
568 out->dma_address = paddr + __direct_map_base;
569 out->dma_length = size;
571 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
572 __va(paddr), size, out->dma_address);
578 /* If physically contiguous and DAC is available, use it. */
579 if (leader->dma_address == 0 && dac_allowed) {
580 out->dma_address = paddr + alpha_mv.pci_dac_offset;
581 out->dma_length = size;
583 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
584 __va(paddr), size, out->dma_address);
589 /* Otherwise, we'll use the iommu to make the pages virtually
593 npages = calc_npages(paddr + size);
594 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
596 /* If we attempted a direct map above but failed, die. */
597 if (leader->dma_address == 0)
600 /* Otherwise, break up the remaining virtually contiguous
601 hunks into individual direct maps and retry. */
602 sg_classify(dev, leader, end, 0);
603 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
606 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
607 out->dma_length = size;
609 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
610 __va(paddr), size, out->dma_address, npages);
612 /* All virtually contiguous. We need to find the length of each
613 physically contiguous subsegment to fill in the ptes. */
614 ptes = &arena->ptes[dma_ofs];
618 struct scatterlist *last_sg = sg;
622 paddr = SG_ENT_PHYS_ADDRESS(sg);
624 while (sg+1 < end && (int) sg[1].dma_address == -1) {
625 size += sg[1].length;
629 npages = calc_npages((paddr & ~PAGE_MASK) + size);
632 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
633 *ptes++ = mk_iommu_pte(paddr);
636 DBGA(" (%ld) [%p,%x] np %ld\n",
637 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
638 last_sg->length, npages);
639 while (++last_sg <= sg) {
640 DBGA(" (%ld) [%p,%x] cont\n",
641 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
645 } while (++sg < end && (int) sg->dma_address < 0);
651 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
654 struct scatterlist *start, *end, *out;
655 struct pci_controller *hose;
656 struct pci_iommu_arena *arena;
661 if (direction == PCI_DMA_NONE)
664 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
666 dev = pdev ? &pdev->dev : NULL;
668 /* Fast path single entry scatterlists. */
670 sg->dma_length = sg->length;
672 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
673 sg->length, dac_allowed);
674 return sg->dma_address != 0;
680 /* First, prepare information about the entries. */
681 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
683 /* Second, figure out where we're going to map things. */
684 if (alpha_mv.mv_pci_tbi) {
685 hose = pdev ? pdev->sysdata : pci_isa_hose;
686 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
687 arena = hose->sg_pci;
688 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
689 arena = hose->sg_isa;
696 /* Third, iterate over the scatterlist leaders and allocate
697 dma space as needed. */
698 for (out = sg; sg < end; ++sg) {
699 if ((int) sg->dma_address < 0)
701 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
706 /* Mark the end of the list for pci_unmap_sg. */
710 if (out - start == 0)
711 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
712 DBGA("pci_map_sg: %ld entries\n", out - start);
717 printk(KERN_WARNING "pci_map_sg failed: "
718 "could not allocate dma page tables\n");
720 /* Some allocation failed while mapping the scatterlist
721 entries. Unmap them now. */
723 pci_unmap_sg(pdev, start, out - start, direction);
726 EXPORT_SYMBOL(pci_map_sg);
728 /* Unmap a set of streaming mode DMA translations. Again, cpu read
729 rules concerning calls here are the same as for pci_unmap_single()
733 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
737 struct pci_controller *hose;
738 struct pci_iommu_arena *arena;
739 struct scatterlist *end;
741 dma_addr_t fbeg, fend;
743 if (direction == PCI_DMA_NONE)
746 if (! alpha_mv.mv_pci_tbi)
749 hose = pdev ? pdev->sysdata : pci_isa_hose;
750 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
751 arena = hose->sg_pci;
752 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
753 arena = hose->sg_isa;
757 spin_lock_irqsave(&arena->lock, flags);
759 for (end = sg + nents; sg < end; ++sg) {
765 addr = sg->dma_address;
766 size = sg->dma_length;
770 if (addr > 0xffffffff) {
771 /* It's a DAC address -- nothing to do. */
772 DBGA(" (%ld) DAC [%lx,%lx]\n",
773 sg - end + nents, addr, size);
777 if (addr >= __direct_map_base
778 && addr < __direct_map_base + __direct_map_size) {
780 DBGA(" (%ld) direct [%lx,%lx]\n",
781 sg - end + nents, addr, size);
785 DBGA(" (%ld) sg [%lx,%lx]\n",
786 sg - end + nents, addr, size);
788 npages = calc_npages((addr & ~PAGE_MASK) + size);
789 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
790 iommu_arena_free(arena, ofs, npages);
792 tend = addr + size - 1;
793 if (fbeg > addr) fbeg = addr;
794 if (fend < tend) fend = tend;
797 /* If we're freeing ptes above the `next_entry' pointer (they
798 may have snuck back into the TLB since the last wrap flush),
799 we need to flush the TLB before reallocating the latter. */
800 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
801 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
803 spin_unlock_irqrestore(&arena->lock, flags);
805 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
807 EXPORT_SYMBOL(pci_unmap_sg);
810 /* Return whether the given PCI device DMA address mask can be
811 supported properly. */
814 pci_dma_supported(struct pci_dev *pdev, u64 mask)
816 struct pci_controller *hose;
817 struct pci_iommu_arena *arena;
819 /* If there exists a direct map, and the mask fits either
820 the entire direct mapped space or the total system memory as
821 shifted by the map base */
822 if (__direct_map_size != 0
823 && (__direct_map_base + __direct_map_size - 1 <= mask ||
824 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
827 /* Check that we have a scatter-gather arena that fits. */
828 hose = pdev ? pdev->sysdata : pci_isa_hose;
829 arena = hose->sg_isa;
830 if (arena && arena->dma_base + arena->size - 1 <= mask)
832 arena = hose->sg_pci;
833 if (arena && arena->dma_base + arena->size - 1 <= mask)
836 /* As last resort try ZONE_DMA. */
837 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
842 EXPORT_SYMBOL(pci_dma_supported);
846 * AGP GART extensions to the IOMMU
849 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
855 if (!arena) return -EINVAL;
857 spin_lock_irqsave(&arena->lock, flags);
859 /* Search for N empty ptes. */
861 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
863 spin_unlock_irqrestore(&arena->lock, flags);
867 /* Success. Mark them all reserved (ie not zero and invalid)
868 for the iommu tlb that could load them from under us.
869 They will be filled in with valid bits by _bind() */
870 for (i = 0; i < pg_count; ++i)
871 ptes[p+i] = IOMMU_RESERVED_PTE;
873 arena->next_entry = p + pg_count;
874 spin_unlock_irqrestore(&arena->lock, flags);
880 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
885 if (!arena) return -EINVAL;
889 /* Make sure they're all reserved first... */
890 for(i = pg_start; i < pg_start + pg_count; i++)
891 if (ptes[i] != IOMMU_RESERVED_PTE)
894 iommu_arena_free(arena, pg_start, pg_count);
899 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
900 unsigned long *physaddrs)
906 if (!arena) return -EINVAL;
908 spin_lock_irqsave(&arena->lock, flags);
912 for(j = pg_start; j < pg_start + pg_count; j++) {
913 if (ptes[j] != IOMMU_RESERVED_PTE) {
914 spin_unlock_irqrestore(&arena->lock, flags);
919 for(i = 0, j = pg_start; i < pg_count; i++, j++)
920 ptes[j] = mk_iommu_pte(physaddrs[i]);
922 spin_unlock_irqrestore(&arena->lock, flags);
928 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
933 if (!arena) return -EINVAL;
935 p = arena->ptes + pg_start;
936 for(i = 0; i < pg_count; i++)
937 p[i] = IOMMU_RESERVED_PTE;
942 /* True if the machine supports DAC addressing, and DEV can
943 make use of it given MASK. */
946 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
948 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
951 /* If this is not set, the machine doesn't support DAC at all. */
955 /* The device has to be able to address our DAC bit. */
956 if ((dac_offset & dev->dma_mask) != dac_offset)
959 /* If both conditions above are met, we are fine. */
960 DBGA("pci_dac_dma_supported %s from %p\n",
961 ok ? "yes" : "no", __builtin_return_address(0));
966 /* Helper for generic DMA-mapping functions. */
969 alpha_gendev_to_pci(struct device *dev)
971 if (dev && dev->bus == &pci_bus_type)
972 return to_pci_dev(dev);
974 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
978 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
979 bridge is bus master then). */
980 if (!dev || !dev->dma_mask || !*dev->dma_mask)
983 /* For EISA bus masters, return isa_bridge (it might have smaller
984 dma_mask due to wiring limitations). */
985 if (*dev->dma_mask >= isa_bridge->dma_mask)
988 /* This assumes ISA bus master with dma_mask 0xffffff. */
991 EXPORT_SYMBOL(alpha_gendev_to_pci);
994 dma_set_mask(struct device *dev, u64 mask)
996 if (!dev->dma_mask ||
997 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
1000 *dev->dma_mask = mask;
1004 EXPORT_SYMBOL(dma_set_mask);