2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/log2.h>
13 #include <asm/hwrpb.h>
21 # define DBGA(args...) printk(KERN_DEBUG args)
23 # define DBGA(args...)
26 # define DBGA2(args...) printk(KERN_DEBUG args)
28 # define DBGA2(args...)
31 #define DEBUG_NODIRECT 0
32 #define DEBUG_FORCEDAC 0
34 #define ISA_DMA_MASK 0x00ffffff
36 static inline unsigned long
37 mk_iommu_pte(unsigned long paddr)
39 return (paddr >> (PAGE_SHIFT-1)) | 1;
43 calc_npages(long bytes)
45 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
49 /* Return the minimum of MAX or the first power of two larger
53 size_for_memory(unsigned long max)
55 unsigned long mem = max_low_pfn << PAGE_SHIFT;
57 max = roundup_pow_of_two(mem);
61 struct pci_iommu_arena *
62 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
63 unsigned long window_size, unsigned long align)
65 unsigned long mem_size;
66 struct pci_iommu_arena *arena;
68 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
70 /* Note that the TLB lookup logic uses bitwise concatenation,
71 not addition, so the required arena alignment is based on
72 the size of the window. Retain the align parameter so that
73 particular systems can over-align the arena. */
78 #ifdef CONFIG_DISCONTIGMEM
80 if (!NODE_DATA(nid) ||
81 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
83 printk("%s: couldn't allocate arena from node %d\n"
84 " falling back to system-wide allocation\n",
86 arena = alloc_bootmem(sizeof(*arena));
89 if (!NODE_DATA(nid) ||
90 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
94 printk("%s: couldn't allocate arena ptes from node %d\n"
95 " falling back to system-wide allocation\n",
97 arena->ptes = __alloc_bootmem(mem_size, align, 0);
100 #else /* CONFIG_DISCONTIGMEM */
102 arena = alloc_bootmem(sizeof(*arena));
103 arena->ptes = __alloc_bootmem(mem_size, align, 0);
105 #endif /* CONFIG_DISCONTIGMEM */
107 spin_lock_init(&arena->lock);
109 arena->dma_base = base;
110 arena->size = window_size;
111 arena->next_entry = 0;
113 /* Align allocations to a multiple of a page size. Not needed
114 unless there are chip bugs. */
115 arena->align_entry = 1;
120 struct pci_iommu_arena *
121 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
122 unsigned long window_size, unsigned long align)
124 return iommu_arena_new_node(0, hose, base, window_size, align);
127 /* Must be called with the arena lock held */
129 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
134 /* Search forward for the first mask-aligned sequence of N free ptes */
136 nent = arena->size >> PAGE_SHIFT;
137 p = (arena->next_entry + mask) & ~mask;
139 while (i < n && p+i < nent) {
141 p = (p + i + 1 + mask) & ~mask, i = 0;
147 /* Reached the end. Flush the TLB and restart the
148 search from the beginning. */
149 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
152 while (i < n && p+i < nent) {
154 p = (p + i + 1 + mask) & ~mask, i = 0;
163 /* Success. It's the responsibility of the caller to mark them
164 in use before releasing the lock */
169 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
175 spin_lock_irqsave(&arena->lock, flags);
177 /* Search for N empty ptes */
179 mask = max(align, arena->align_entry) - 1;
180 p = iommu_arena_find_pages(arena, n, mask);
182 spin_unlock_irqrestore(&arena->lock, flags);
186 /* Success. Mark them all in use, ie not zero and invalid
187 for the iommu tlb that could load them from under us.
188 The chip specific bits will fill this in with something
189 kosher when we return. */
190 for (i = 0; i < n; ++i)
191 ptes[p+i] = IOMMU_INVALID_PTE;
193 arena->next_entry = p + n;
194 spin_unlock_irqrestore(&arena->lock, flags);
200 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
205 p = arena->ptes + ofs;
206 for (i = 0; i < n; ++i)
210 /* Map a single buffer of the indicated size for PCI DMA in streaming
211 mode. The 32-bit PCI bus mastering address to use is returned.
212 Once the device is given the dma address, the device owns this memory
213 until either pci_unmap_single or pci_dma_sync_single is performed. */
216 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
219 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
220 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
221 struct pci_iommu_arena *arena;
222 long npages, dma_ofs, i;
225 unsigned int align = 0;
227 paddr = __pa(cpu_addr);
230 /* First check to see if we can use the direct map window. */
231 if (paddr + size + __direct_map_base - 1 <= max_dma
232 && paddr + size <= __direct_map_size) {
233 ret = paddr + __direct_map_base;
235 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
236 cpu_addr, size, ret, __builtin_return_address(0));
242 /* Next, use DAC if selected earlier. */
244 ret = paddr + alpha_mv.pci_dac_offset;
246 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
247 cpu_addr, size, ret, __builtin_return_address(0));
252 /* If the machine doesn't define a pci_tbi routine, we have to
253 assume it doesn't support sg mapping, and, since we tried to
254 use direct_map above, it now must be considered an error. */
255 if (! alpha_mv.mv_pci_tbi) {
256 static int been_here = 0; /* Only print the message once. */
258 printk(KERN_WARNING "pci_map_single: no HW sg\n");
264 arena = hose->sg_pci;
265 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
266 arena = hose->sg_isa;
268 npages = calc_npages((paddr & ~PAGE_MASK) + size);
270 /* Force allocation to 64KB boundary for ISA bridges. */
271 if (pdev && pdev == isa_bridge)
273 dma_ofs = iommu_arena_alloc(arena, npages, align);
275 printk(KERN_WARNING "pci_map_single failed: "
276 "could not allocate dma page tables\n");
281 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
282 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
284 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
285 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
287 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
288 cpu_addr, size, npages, ret, __builtin_return_address(0));
294 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
298 if (dir == PCI_DMA_NONE)
301 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
302 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
304 EXPORT_SYMBOL(pci_map_single);
307 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
308 size_t size, int dir)
312 if (dir == PCI_DMA_NONE)
315 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
316 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
319 EXPORT_SYMBOL(pci_map_page);
321 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
322 SIZE must match what was provided for in a previous pci_map_single
323 call. All other usages are undefined. After this call, reads by
324 the cpu to the buffer are guaranteed to see whatever the device
328 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
332 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
333 struct pci_iommu_arena *arena;
334 long dma_ofs, npages;
336 if (direction == PCI_DMA_NONE)
339 if (dma_addr >= __direct_map_base
340 && dma_addr < __direct_map_base + __direct_map_size) {
343 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
344 dma_addr, size, __builtin_return_address(0));
349 if (dma_addr > 0xffffffff) {
350 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
351 dma_addr, size, __builtin_return_address(0));
355 arena = hose->sg_pci;
356 if (!arena || dma_addr < arena->dma_base)
357 arena = hose->sg_isa;
359 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
360 if (dma_ofs * PAGE_SIZE >= arena->size) {
361 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
362 " base %lx size %x\n", dma_addr, arena->dma_base,
368 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
370 spin_lock_irqsave(&arena->lock, flags);
372 iommu_arena_free(arena, dma_ofs, npages);
374 /* If we're freeing ptes above the `next_entry' pointer (they
375 may have snuck back into the TLB since the last wrap flush),
376 we need to flush the TLB before reallocating the latter. */
377 if (dma_ofs >= arena->next_entry)
378 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
380 spin_unlock_irqrestore(&arena->lock, flags);
382 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
383 dma_addr, size, npages, __builtin_return_address(0));
385 EXPORT_SYMBOL(pci_unmap_single);
388 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
389 size_t size, int direction)
391 pci_unmap_single(pdev, dma_addr, size, direction);
393 EXPORT_SYMBOL(pci_unmap_page);
395 /* Allocate and map kernel buffer using consistent mode DMA for PCI
396 device. Returns non-NULL cpu-view pointer to the buffer if
397 successful and sets *DMA_ADDRP to the pci side dma address as well,
398 else DMA_ADDRP is undefined. */
401 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
404 long order = get_order(size);
405 gfp_t gfp = GFP_ATOMIC;
408 cpu_addr = (void *)__get_free_pages(gfp, order);
410 printk(KERN_INFO "pci_alloc_consistent: "
411 "get_free_pages failed from %p\n",
412 __builtin_return_address(0));
413 /* ??? Really atomic allocation? Otherwise we could play
414 with vmalloc and sg if we can't find contiguous memory. */
417 memset(cpu_addr, 0, size);
419 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
420 if (*dma_addrp == 0) {
421 free_pages((unsigned long)cpu_addr, order);
422 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
424 /* The address doesn't fit required mask and we
425 do not have iommu. Try again with GFP_DMA. */
430 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
431 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
435 EXPORT_SYMBOL(pci_alloc_consistent);
437 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
438 be values that were returned from pci_alloc_consistent. SIZE must
439 be the same as what as passed into pci_alloc_consistent.
440 References to the memory and mappings associated with CPU_ADDR or
441 DMA_ADDR past this call are illegal. */
444 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
447 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
448 free_pages((unsigned long)cpu_addr, get_order(size));
450 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
451 dma_addr, size, __builtin_return_address(0));
453 EXPORT_SYMBOL(pci_free_consistent);
455 /* Classify the elements of the scatterlist. Write dma_address
456 of each element with:
457 0 : Followers all physically adjacent.
458 1 : Followers all virtually adjacent.
459 -1 : Not leader, physically adjacent to previous.
460 -2 : Not leader, virtually adjacent to previous.
461 Write dma_length of each leader with the combined lengths of
462 the mergable followers. */
464 #define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
465 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
468 sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
470 unsigned long next_paddr;
471 struct scatterlist *leader;
472 long leader_flag, leader_length;
476 leader_length = leader->length;
477 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
479 for (++sg; sg < end; ++sg) {
480 unsigned long addr, len;
481 addr = SG_ENT_PHYS_ADDRESS(sg);
484 if (next_paddr == addr) {
485 sg->dma_address = -1;
486 leader_length += len;
487 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
488 sg->dma_address = -2;
490 leader_length += len;
492 leader->dma_address = leader_flag;
493 leader->dma_length = leader_length;
499 next_paddr = addr + len;
502 leader->dma_address = leader_flag;
503 leader->dma_length = leader_length;
506 /* Given a scatterlist leader, choose an allocation method and fill
510 sg_fill(struct scatterlist *leader, struct scatterlist *end,
511 struct scatterlist *out, struct pci_iommu_arena *arena,
512 dma_addr_t max_dma, int dac_allowed)
514 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
515 long size = leader->dma_length;
516 struct scatterlist *sg;
518 long npages, dma_ofs, i;
521 /* If everything is physically contiguous, and the addresses
522 fall into the direct-map window, use it. */
523 if (leader->dma_address == 0
524 && paddr + size + __direct_map_base - 1 <= max_dma
525 && paddr + size <= __direct_map_size) {
526 out->dma_address = paddr + __direct_map_base;
527 out->dma_length = size;
529 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
530 __va(paddr), size, out->dma_address);
536 /* If physically contiguous and DAC is available, use it. */
537 if (leader->dma_address == 0 && dac_allowed) {
538 out->dma_address = paddr + alpha_mv.pci_dac_offset;
539 out->dma_length = size;
541 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
542 __va(paddr), size, out->dma_address);
547 /* Otherwise, we'll use the iommu to make the pages virtually
551 npages = calc_npages(paddr + size);
552 dma_ofs = iommu_arena_alloc(arena, npages, 0);
554 /* If we attempted a direct map above but failed, die. */
555 if (leader->dma_address == 0)
558 /* Otherwise, break up the remaining virtually contiguous
559 hunks into individual direct maps and retry. */
560 sg_classify(leader, end, 0);
561 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
564 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
565 out->dma_length = size;
567 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
568 __va(paddr), size, out->dma_address, npages);
570 /* All virtually contiguous. We need to find the length of each
571 physically contiguous subsegment to fill in the ptes. */
572 ptes = &arena->ptes[dma_ofs];
576 struct scatterlist *last_sg = sg;
580 paddr = SG_ENT_PHYS_ADDRESS(sg);
582 while (sg+1 < end && (int) sg[1].dma_address == -1) {
583 size += sg[1].length;
587 npages = calc_npages((paddr & ~PAGE_MASK) + size);
590 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
591 *ptes++ = mk_iommu_pte(paddr);
594 DBGA(" (%ld) [%p,%x] np %ld\n",
595 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
596 last_sg->length, npages);
597 while (++last_sg <= sg) {
598 DBGA(" (%ld) [%p,%x] cont\n",
599 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
603 } while (++sg < end && (int) sg->dma_address < 0);
609 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
612 struct scatterlist *start, *end, *out;
613 struct pci_controller *hose;
614 struct pci_iommu_arena *arena;
618 if (direction == PCI_DMA_NONE)
621 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
623 /* Fast path single entry scatterlists. */
625 sg->dma_length = sg->length;
627 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
628 sg->length, dac_allowed);
629 return sg->dma_address != 0;
635 /* First, prepare information about the entries. */
636 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
638 /* Second, figure out where we're going to map things. */
639 if (alpha_mv.mv_pci_tbi) {
640 hose = pdev ? pdev->sysdata : pci_isa_hose;
641 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
642 arena = hose->sg_pci;
643 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
644 arena = hose->sg_isa;
651 /* Third, iterate over the scatterlist leaders and allocate
652 dma space as needed. */
653 for (out = sg; sg < end; ++sg) {
654 if ((int) sg->dma_address < 0)
656 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
661 /* Mark the end of the list for pci_unmap_sg. */
665 if (out - start == 0)
666 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
667 DBGA("pci_map_sg: %ld entries\n", out - start);
672 printk(KERN_WARNING "pci_map_sg failed: "
673 "could not allocate dma page tables\n");
675 /* Some allocation failed while mapping the scatterlist
676 entries. Unmap them now. */
678 pci_unmap_sg(pdev, start, out - start, direction);
681 EXPORT_SYMBOL(pci_map_sg);
683 /* Unmap a set of streaming mode DMA translations. Again, cpu read
684 rules concerning calls here are the same as for pci_unmap_single()
688 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
692 struct pci_controller *hose;
693 struct pci_iommu_arena *arena;
694 struct scatterlist *end;
696 dma_addr_t fbeg, fend;
698 if (direction == PCI_DMA_NONE)
701 if (! alpha_mv.mv_pci_tbi)
704 hose = pdev ? pdev->sysdata : pci_isa_hose;
705 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
706 arena = hose->sg_pci;
707 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
708 arena = hose->sg_isa;
712 spin_lock_irqsave(&arena->lock, flags);
714 for (end = sg + nents; sg < end; ++sg) {
720 addr = sg->dma_address;
721 size = sg->dma_length;
725 if (addr > 0xffffffff) {
726 /* It's a DAC address -- nothing to do. */
727 DBGA(" (%ld) DAC [%lx,%lx]\n",
728 sg - end + nents, addr, size);
732 if (addr >= __direct_map_base
733 && addr < __direct_map_base + __direct_map_size) {
735 DBGA(" (%ld) direct [%lx,%lx]\n",
736 sg - end + nents, addr, size);
740 DBGA(" (%ld) sg [%lx,%lx]\n",
741 sg - end + nents, addr, size);
743 npages = calc_npages((addr & ~PAGE_MASK) + size);
744 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
745 iommu_arena_free(arena, ofs, npages);
747 tend = addr + size - 1;
748 if (fbeg > addr) fbeg = addr;
749 if (fend < tend) fend = tend;
752 /* If we're freeing ptes above the `next_entry' pointer (they
753 may have snuck back into the TLB since the last wrap flush),
754 we need to flush the TLB before reallocating the latter. */
755 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
756 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
758 spin_unlock_irqrestore(&arena->lock, flags);
760 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
762 EXPORT_SYMBOL(pci_unmap_sg);
765 /* Return whether the given PCI device DMA address mask can be
766 supported properly. */
769 pci_dma_supported(struct pci_dev *pdev, u64 mask)
771 struct pci_controller *hose;
772 struct pci_iommu_arena *arena;
774 /* If there exists a direct map, and the mask fits either
775 the entire direct mapped space or the total system memory as
776 shifted by the map base */
777 if (__direct_map_size != 0
778 && (__direct_map_base + __direct_map_size - 1 <= mask ||
779 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
782 /* Check that we have a scatter-gather arena that fits. */
783 hose = pdev ? pdev->sysdata : pci_isa_hose;
784 arena = hose->sg_isa;
785 if (arena && arena->dma_base + arena->size - 1 <= mask)
787 arena = hose->sg_pci;
788 if (arena && arena->dma_base + arena->size - 1 <= mask)
791 /* As last resort try ZONE_DMA. */
792 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
797 EXPORT_SYMBOL(pci_dma_supported);
801 * AGP GART extensions to the IOMMU
804 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
810 if (!arena) return -EINVAL;
812 spin_lock_irqsave(&arena->lock, flags);
814 /* Search for N empty ptes. */
816 p = iommu_arena_find_pages(arena, pg_count, align_mask);
818 spin_unlock_irqrestore(&arena->lock, flags);
822 /* Success. Mark them all reserved (ie not zero and invalid)
823 for the iommu tlb that could load them from under us.
824 They will be filled in with valid bits by _bind() */
825 for (i = 0; i < pg_count; ++i)
826 ptes[p+i] = IOMMU_RESERVED_PTE;
828 arena->next_entry = p + pg_count;
829 spin_unlock_irqrestore(&arena->lock, flags);
835 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
840 if (!arena) return -EINVAL;
844 /* Make sure they're all reserved first... */
845 for(i = pg_start; i < pg_start + pg_count; i++)
846 if (ptes[i] != IOMMU_RESERVED_PTE)
849 iommu_arena_free(arena, pg_start, pg_count);
854 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
855 unsigned long *physaddrs)
861 if (!arena) return -EINVAL;
863 spin_lock_irqsave(&arena->lock, flags);
867 for(j = pg_start; j < pg_start + pg_count; j++) {
868 if (ptes[j] != IOMMU_RESERVED_PTE) {
869 spin_unlock_irqrestore(&arena->lock, flags);
874 for(i = 0, j = pg_start; i < pg_count; i++, j++)
875 ptes[j] = mk_iommu_pte(physaddrs[i]);
877 spin_unlock_irqrestore(&arena->lock, flags);
883 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
888 if (!arena) return -EINVAL;
890 p = arena->ptes + pg_start;
891 for(i = 0; i < pg_count; i++)
892 p[i] = IOMMU_RESERVED_PTE;
897 /* True if the machine supports DAC addressing, and DEV can
898 make use of it given MASK. */
901 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
903 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
906 /* If this is not set, the machine doesn't support DAC at all. */
910 /* The device has to be able to address our DAC bit. */
911 if ((dac_offset & dev->dma_mask) != dac_offset)
914 /* If both conditions above are met, we are fine. */
915 DBGA("pci_dac_dma_supported %s from %p\n",
916 ok ? "yes" : "no", __builtin_return_address(0));
920 EXPORT_SYMBOL(pci_dac_dma_supported);
923 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
924 unsigned long offset, int direction)
926 return (alpha_mv.pci_dac_offset
927 + __pa(page_address(page))
928 + (dma64_addr_t) offset);
930 EXPORT_SYMBOL(pci_dac_page_to_dma);
933 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
935 unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
936 return virt_to_page(__va(paddr));
938 EXPORT_SYMBOL(pci_dac_dma_to_page);
941 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
943 return (dma_addr & ~PAGE_MASK);
945 EXPORT_SYMBOL(pci_dac_dma_to_offset);
947 /* Helper for generic DMA-mapping functions. */
950 alpha_gendev_to_pci(struct device *dev)
952 if (dev && dev->bus == &pci_bus_type)
953 return to_pci_dev(dev);
955 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
959 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
960 bridge is bus master then). */
961 if (!dev || !dev->dma_mask || !*dev->dma_mask)
964 /* For EISA bus masters, return isa_bridge (it might have smaller
965 dma_mask due to wiring limitations). */
966 if (*dev->dma_mask >= isa_bridge->dma_mask)
969 /* This assumes ISA bus master with dma_mask 0xffffff. */
972 EXPORT_SYMBOL(alpha_gendev_to_pci);
975 dma_set_mask(struct device *dev, u64 mask)
977 if (!dev->dma_mask ||
978 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
981 *dev->dma_mask = mask;
985 EXPORT_SYMBOL(dma_set_mask);