]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/powerpc/kernel/iommu.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6-omap-h63xx.git] / arch / powerpc / kernel / iommu.c
1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  * 
4  * Rewrite, cleanup, new allocation schemes, virtual merging: 
5  * Copyright (C) 2004 Olof Johansson, IBM Corporation
6  *               and  Ben. Herrenschmidt, IBM Corporation
7  *
8  * Dynamic DMA mapping support, bus-independent parts.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  * 
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  * 
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24
25
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/iommu-helper.h>
35 #include <asm/io.h>
36 #include <asm/prom.h>
37 #include <asm/iommu.h>
38 #include <asm/pci-bridge.h>
39 #include <asm/machdep.h>
40 #include <asm/kdump.h>
41
42 #define DBG(...)
43
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge = 0;
46 #else
47 static int novmerge = 1;
48 #endif
49
50 static int protect4gb = 1;
51
52 static inline unsigned long iommu_num_pages(unsigned long vaddr,
53                                             unsigned long slen)
54 {
55         unsigned long npages;
56
57         npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
58         npages >>= IOMMU_PAGE_SHIFT;
59
60         return npages;
61 }
62
63 static int __init setup_protect4gb(char *str)
64 {
65         if (strcmp(str, "on") == 0)
66                 protect4gb = 1;
67         else if (strcmp(str, "off") == 0)
68                 protect4gb = 0;
69
70         return 1;
71 }
72
73 static int __init setup_iommu(char *str)
74 {
75         if (!strcmp(str, "novmerge"))
76                 novmerge = 1;
77         else if (!strcmp(str, "vmerge"))
78                 novmerge = 0;
79         return 1;
80 }
81
82 __setup("protect4gb=", setup_protect4gb);
83 __setup("iommu=", setup_iommu);
84
85 static unsigned long iommu_range_alloc(struct device *dev,
86                                        struct iommu_table *tbl,
87                                        unsigned long npages,
88                                        unsigned long *handle,
89                                        unsigned long mask,
90                                        unsigned int align_order)
91
92         unsigned long n, end, start;
93         unsigned long limit;
94         int largealloc = npages > 15;
95         int pass = 0;
96         unsigned long align_mask;
97         unsigned long boundary_size;
98
99         align_mask = 0xffffffffffffffffl >> (64 - align_order);
100
101         /* This allocator was derived from x86_64's bit string search */
102
103         /* Sanity check */
104         if (unlikely(npages == 0)) {
105                 if (printk_ratelimit())
106                         WARN_ON(1);
107                 return DMA_ERROR_CODE;
108         }
109
110         if (handle && *handle)
111                 start = *handle;
112         else
113                 start = largealloc ? tbl->it_largehint : tbl->it_hint;
114
115         /* Use only half of the table for small allocs (15 pages or less) */
116         limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
117
118         if (largealloc && start < tbl->it_halfpoint)
119                 start = tbl->it_halfpoint;
120
121         /* The case below can happen if we have a small segment appended
122          * to a large, or when the previous alloc was at the very end of
123          * the available space. If so, go back to the initial start.
124          */
125         if (start >= limit)
126                 start = largealloc ? tbl->it_largehint : tbl->it_hint;
127
128  again:
129
130         if (limit + tbl->it_offset > mask) {
131                 limit = mask - tbl->it_offset + 1;
132                 /* If we're constrained on address range, first try
133                  * at the masked hint to avoid O(n) search complexity,
134                  * but on second pass, start at 0.
135                  */
136                 if ((start & mask) >= limit || pass > 0)
137                         start = 0;
138                 else
139                         start &= mask;
140         }
141
142         if (dev)
143                 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
144                                       1 << IOMMU_PAGE_SHIFT);
145         else
146                 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
147         /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
148
149         n = iommu_area_alloc(tbl->it_map, limit, start, npages,
150                              tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
151                              align_mask);
152         if (n == -1) {
153                 if (likely(pass < 2)) {
154                         /* First failure, just rescan the half of the table.
155                          * Second failure, rescan the other half of the table.
156                          */
157                         start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
158                         limit = pass ? tbl->it_size : limit;
159                         pass++;
160                         goto again;
161                 } else {
162                         /* Third failure, give up */
163                         return DMA_ERROR_CODE;
164                 }
165         }
166
167         end = n + npages;
168
169         /* Bump the hint to a new block for small allocs. */
170         if (largealloc) {
171                 /* Don't bump to new block to avoid fragmentation */
172                 tbl->it_largehint = end;
173         } else {
174                 /* Overflow will be taken care of at the next allocation */
175                 tbl->it_hint = (end + tbl->it_blocksize - 1) &
176                                 ~(tbl->it_blocksize - 1);
177         }
178
179         /* Update handle for SG allocations */
180         if (handle)
181                 *handle = end;
182
183         return n;
184 }
185
186 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
187                               void *page, unsigned int npages,
188                               enum dma_data_direction direction,
189                               unsigned long mask, unsigned int align_order)
190 {
191         unsigned long entry, flags;
192         dma_addr_t ret = DMA_ERROR_CODE;
193
194         spin_lock_irqsave(&(tbl->it_lock), flags);
195
196         entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
197
198         if (unlikely(entry == DMA_ERROR_CODE)) {
199                 spin_unlock_irqrestore(&(tbl->it_lock), flags);
200                 return DMA_ERROR_CODE;
201         }
202
203         entry += tbl->it_offset;        /* Offset into real TCE table */
204         ret = entry << IOMMU_PAGE_SHIFT;        /* Set the return dma address */
205
206         /* Put the TCEs in the HW table */
207         ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
208                          direction);
209
210
211         /* Flush/invalidate TLB caches if necessary */
212         if (ppc_md.tce_flush)
213                 ppc_md.tce_flush(tbl);
214
215         spin_unlock_irqrestore(&(tbl->it_lock), flags);
216
217         /* Make sure updates are seen by hardware */
218         mb();
219
220         return ret;
221 }
222
223 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 
224                          unsigned int npages)
225 {
226         unsigned long entry, free_entry;
227
228         entry = dma_addr >> IOMMU_PAGE_SHIFT;
229         free_entry = entry - tbl->it_offset;
230
231         if (((free_entry + npages) > tbl->it_size) ||
232             (entry < tbl->it_offset)) {
233                 if (printk_ratelimit()) {
234                         printk(KERN_INFO "iommu_free: invalid entry\n");
235                         printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
236                         printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
237                         printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
238                         printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
239                         printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
240                         printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
241                         printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
242                         WARN_ON(1);
243                 }
244                 return;
245         }
246
247         ppc_md.tce_free(tbl, entry, npages);
248         iommu_area_free(tbl->it_map, free_entry, npages);
249 }
250
251 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
252                 unsigned int npages)
253 {
254         unsigned long flags;
255
256         spin_lock_irqsave(&(tbl->it_lock), flags);
257
258         __iommu_free(tbl, dma_addr, npages);
259
260         /* Make sure TLB cache is flushed if the HW needs it. We do
261          * not do an mb() here on purpose, it is not needed on any of
262          * the current platforms.
263          */
264         if (ppc_md.tce_flush)
265                 ppc_md.tce_flush(tbl);
266
267         spin_unlock_irqrestore(&(tbl->it_lock), flags);
268 }
269
270 int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
271                  int nelems, unsigned long mask,
272                  enum dma_data_direction direction)
273 {
274         struct iommu_table *tbl = dev->archdata.dma_data;
275         dma_addr_t dma_next = 0, dma_addr;
276         unsigned long flags;
277         struct scatterlist *s, *outs, *segstart;
278         int outcount, incount, i;
279         unsigned int align;
280         unsigned long handle;
281         unsigned int max_seg_size;
282
283         BUG_ON(direction == DMA_NONE);
284
285         if ((nelems == 0) || !tbl)
286                 return 0;
287
288         outs = s = segstart = &sglist[0];
289         outcount = 1;
290         incount = nelems;
291         handle = 0;
292
293         /* Init first segment length for backout at failure */
294         outs->dma_length = 0;
295
296         DBG("sg mapping %d elements:\n", nelems);
297
298         spin_lock_irqsave(&(tbl->it_lock), flags);
299
300         max_seg_size = dma_get_max_seg_size(dev);
301         for_each_sg(sglist, s, nelems, i) {
302                 unsigned long vaddr, npages, entry, slen;
303
304                 slen = s->length;
305                 /* Sanity check */
306                 if (slen == 0) {
307                         dma_next = 0;
308                         continue;
309                 }
310                 /* Allocate iommu entries for that segment */
311                 vaddr = (unsigned long) sg_virt(s);
312                 npages = iommu_num_pages(vaddr, slen);
313                 align = 0;
314                 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
315                     (vaddr & ~PAGE_MASK) == 0)
316                         align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
317                 entry = iommu_range_alloc(dev, tbl, npages, &handle,
318                                           mask >> IOMMU_PAGE_SHIFT, align);
319
320                 DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
321
322                 /* Handle failure */
323                 if (unlikely(entry == DMA_ERROR_CODE)) {
324                         if (printk_ratelimit())
325                                 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
326                                        " npages %lx\n", tbl, vaddr, npages);
327                         goto failure;
328                 }
329
330                 /* Convert entry to a dma_addr_t */
331                 entry += tbl->it_offset;
332                 dma_addr = entry << IOMMU_PAGE_SHIFT;
333                 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
334
335                 DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
336                             npages, entry, dma_addr);
337
338                 /* Insert into HW table */
339                 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
340
341                 /* If we are in an open segment, try merging */
342                 if (segstart != s) {
343                         DBG("  - trying merge...\n");
344                         /* We cannot merge if:
345                          * - allocated dma_addr isn't contiguous to previous allocation
346                          */
347                         if (novmerge || (dma_addr != dma_next) ||
348                             (outs->dma_length + s->length > max_seg_size)) {
349                                 /* Can't merge: create a new segment */
350                                 segstart = s;
351                                 outcount++;
352                                 outs = sg_next(outs);
353                                 DBG("    can't merge, new segment.\n");
354                         } else {
355                                 outs->dma_length += s->length;
356                                 DBG("    merged, new len: %ux\n", outs->dma_length);
357                         }
358                 }
359
360                 if (segstart == s) {
361                         /* This is a new segment, fill entries */
362                         DBG("  - filling new segment.\n");
363                         outs->dma_address = dma_addr;
364                         outs->dma_length = slen;
365                 }
366
367                 /* Calculate next page pointer for contiguous check */
368                 dma_next = dma_addr + slen;
369
370                 DBG("  - dma next is: %lx\n", dma_next);
371         }
372
373         /* Flush/invalidate TLB caches if necessary */
374         if (ppc_md.tce_flush)
375                 ppc_md.tce_flush(tbl);
376
377         spin_unlock_irqrestore(&(tbl->it_lock), flags);
378
379         DBG("mapped %d elements:\n", outcount);
380
381         /* For the sake of iommu_unmap_sg, we clear out the length in the
382          * next entry of the sglist if we didn't fill the list completely
383          */
384         if (outcount < incount) {
385                 outs = sg_next(outs);
386                 outs->dma_address = DMA_ERROR_CODE;
387                 outs->dma_length = 0;
388         }
389
390         /* Make sure updates are seen by hardware */
391         mb();
392
393         return outcount;
394
395  failure:
396         for_each_sg(sglist, s, nelems, i) {
397                 if (s->dma_length != 0) {
398                         unsigned long vaddr, npages;
399
400                         vaddr = s->dma_address & IOMMU_PAGE_MASK;
401                         npages = iommu_num_pages(s->dma_address, s->dma_length);
402                         __iommu_free(tbl, vaddr, npages);
403                         s->dma_address = DMA_ERROR_CODE;
404                         s->dma_length = 0;
405                 }
406                 if (s == outs)
407                         break;
408         }
409         spin_unlock_irqrestore(&(tbl->it_lock), flags);
410         return 0;
411 }
412
413
414 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
415                 int nelems, enum dma_data_direction direction)
416 {
417         struct scatterlist *sg;
418         unsigned long flags;
419
420         BUG_ON(direction == DMA_NONE);
421
422         if (!tbl)
423                 return;
424
425         spin_lock_irqsave(&(tbl->it_lock), flags);
426
427         sg = sglist;
428         while (nelems--) {
429                 unsigned int npages;
430                 dma_addr_t dma_handle = sg->dma_address;
431
432                 if (sg->dma_length == 0)
433                         break;
434                 npages = iommu_num_pages(dma_handle, sg->dma_length);
435                 __iommu_free(tbl, dma_handle, npages);
436                 sg = sg_next(sg);
437         }
438
439         /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
440          * do not do an mb() here, the affected platforms do not need it
441          * when freeing.
442          */
443         if (ppc_md.tce_flush)
444                 ppc_md.tce_flush(tbl);
445
446         spin_unlock_irqrestore(&(tbl->it_lock), flags);
447 }
448
449 /*
450  * Build a iommu_table structure.  This contains a bit map which
451  * is used to manage allocation of the tce space.
452  */
453 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
454 {
455         unsigned long sz;
456         static int welcomed = 0;
457         struct page *page;
458
459         /* Set aside 1/4 of the table for large allocations. */
460         tbl->it_halfpoint = tbl->it_size * 3 / 4;
461
462         /* number of bytes needed for the bitmap */
463         sz = (tbl->it_size + 7) >> 3;
464
465         page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
466         if (!page)
467                 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
468         tbl->it_map = page_address(page);
469         memset(tbl->it_map, 0, sz);
470
471         tbl->it_hint = 0;
472         tbl->it_largehint = tbl->it_halfpoint;
473         spin_lock_init(&tbl->it_lock);
474
475 #ifdef CONFIG_CRASH_DUMP
476         if (ppc_md.tce_get) {
477                 unsigned long index;
478                 unsigned long tceval;
479                 unsigned long tcecount = 0;
480
481                 /*
482                  * Reserve the existing mappings left by the first kernel.
483                  */
484                 for (index = 0; index < tbl->it_size; index++) {
485                         tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
486                         /*
487                          * Freed TCE entry contains 0x7fffffffffffffff on JS20
488                          */
489                         if (tceval && (tceval != 0x7fffffffffffffffUL)) {
490                                 __set_bit(index, tbl->it_map);
491                                 tcecount++;
492                         }
493                 }
494                 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
495                         printk(KERN_WARNING "TCE table is full; ");
496                         printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
497                                 KDUMP_MIN_TCE_ENTRIES);
498                         for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
499                                 index < tbl->it_size; index++)
500                                 __clear_bit(index, tbl->it_map);
501                 }
502         }
503 #else
504         /* Clear the hardware table in case firmware left allocations in it */
505         ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
506 #endif
507
508         if (!welcomed) {
509                 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
510                        novmerge ? "disabled" : "enabled");
511                 welcomed = 1;
512         }
513
514         return tbl;
515 }
516
517 void iommu_free_table(struct iommu_table *tbl, const char *node_name)
518 {
519         unsigned long bitmap_sz, i;
520         unsigned int order;
521
522         if (!tbl || !tbl->it_map) {
523                 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
524                                 node_name);
525                 return;
526         }
527
528         /* verify that table contains no entries */
529         /* it_size is in entries, and we're examining 64 at a time */
530         for (i = 0; i < (tbl->it_size/64); i++) {
531                 if (tbl->it_map[i] != 0) {
532                         printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
533                                 __func__, node_name);
534                         break;
535                 }
536         }
537
538         /* calculate bitmap size in bytes */
539         bitmap_sz = (tbl->it_size + 7) / 8;
540
541         /* free bitmap */
542         order = get_order(bitmap_sz);
543         free_pages((unsigned long) tbl->it_map, order);
544
545         /* free table */
546         kfree(tbl);
547 }
548
549 /* Creates TCEs for a user provided buffer.  The user buffer must be
550  * contiguous real kernel storage (not vmalloc).  The address of the buffer
551  * passed here is the kernel (virtual) address of the buffer.  The buffer
552  * need not be page aligned, the dma_addr_t returned will point to the same
553  * byte within the page as vaddr.
554  */
555 dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
556                             void *vaddr, size_t size, unsigned long mask,
557                             enum dma_data_direction direction)
558 {
559         dma_addr_t dma_handle = DMA_ERROR_CODE;
560         unsigned long uaddr;
561         unsigned int npages, align;
562
563         BUG_ON(direction == DMA_NONE);
564
565         uaddr = (unsigned long)vaddr;
566         npages = iommu_num_pages(uaddr, size);
567
568         if (tbl) {
569                 align = 0;
570                 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
571                     ((unsigned long)vaddr & ~PAGE_MASK) == 0)
572                         align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
573
574                 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
575                                          mask >> IOMMU_PAGE_SHIFT, align);
576                 if (dma_handle == DMA_ERROR_CODE) {
577                         if (printk_ratelimit())  {
578                                 printk(KERN_INFO "iommu_alloc failed, "
579                                                 "tbl %p vaddr %p npages %d\n",
580                                                 tbl, vaddr, npages);
581                         }
582                 } else
583                         dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
584         }
585
586         return dma_handle;
587 }
588
589 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
590                 size_t size, enum dma_data_direction direction)
591 {
592         unsigned int npages;
593
594         BUG_ON(direction == DMA_NONE);
595
596         if (tbl) {
597                 npages = iommu_num_pages(dma_handle, size);
598                 iommu_free(tbl, dma_handle, npages);
599         }
600 }
601
602 /* Allocates a contiguous real buffer and creates mappings over it.
603  * Returns the virtual address of the buffer and sets dma_handle
604  * to the dma address (mapping) of the first page.
605  */
606 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
607                            size_t size, dma_addr_t *dma_handle,
608                            unsigned long mask, gfp_t flag, int node)
609 {
610         void *ret = NULL;
611         dma_addr_t mapping;
612         unsigned int order;
613         unsigned int nio_pages, io_order;
614         struct page *page;
615
616         size = PAGE_ALIGN(size);
617         order = get_order(size);
618
619         /*
620          * Client asked for way too much space.  This is checked later
621          * anyway.  It is easier to debug here for the drivers than in
622          * the tce tables.
623          */
624         if (order >= IOMAP_MAX_ORDER) {
625                 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
626                 return NULL;
627         }
628
629         if (!tbl)
630                 return NULL;
631
632         /* Alloc enough pages (and possibly more) */
633         page = alloc_pages_node(node, flag, order);
634         if (!page)
635                 return NULL;
636         ret = page_address(page);
637         memset(ret, 0, size);
638
639         /* Set up tces to cover the allocated range */
640         nio_pages = size >> IOMMU_PAGE_SHIFT;
641         io_order = get_iommu_order(size);
642         mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
643                               mask >> IOMMU_PAGE_SHIFT, io_order);
644         if (mapping == DMA_ERROR_CODE) {
645                 free_pages((unsigned long)ret, order);
646                 return NULL;
647         }
648         *dma_handle = mapping;
649         return ret;
650 }
651
652 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
653                          void *vaddr, dma_addr_t dma_handle)
654 {
655         if (tbl) {
656                 unsigned int nio_pages;
657
658                 size = PAGE_ALIGN(size);
659                 nio_pages = size >> IOMMU_PAGE_SHIFT;
660                 iommu_free(tbl, dma_handle, nio_pages);
661                 size = PAGE_ALIGN(size);
662                 free_pages((unsigned long)vaddr, get_order(size));
663         }
664 }