X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=arch%2Fpowerpc%2Fkernel%2Fiommu.c;h=3857d7e2af0cab9513c72262f2e6372c66bf31c9;hb=feeedc6c820e1026453ad865076cee435f24d30a;hp=550a19399bfaac81bceabfbeb60f4a29e0e0752f;hpb=881b374705f352725ba1f558968ef34c17ba900e;p=linux-2.6-omap-h63xx.git diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 550a19399bf..3857d7e2af0 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -51,17 +51,6 @@ static int protect4gb = 1; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); -static inline unsigned long iommu_num_pages(unsigned long vaddr, - unsigned long slen) -{ - unsigned long npages; - - npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); - npages >>= IOMMU_PAGE_SHIFT; - - return npages; -} - static int __init setup_protect4gb(char *str) { if (strcmp(str, "on") == 0) @@ -325,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, } /* Allocate iommu entries for that segment */ vaddr = (unsigned long) sg_virt(s); - npages = iommu_num_pages(vaddr, slen); + npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); align = 0; if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && (vaddr & ~PAGE_MASK) == 0) @@ -418,7 +407,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, unsigned long vaddr, npages; vaddr = s->dma_address & IOMMU_PAGE_MASK; - npages = iommu_num_pages(s->dma_address, s->dma_length); + npages = iommu_num_pages(s->dma_address, s->dma_length, + IOMMU_PAGE_SIZE); __iommu_free(tbl, vaddr, npages); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -452,7 +442,8 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, if (sg->dma_length == 0) break; - npages = iommu_num_pages(dma_handle, sg->dma_length); + npages = iommu_num_pages(dma_handle, sg->dma_length, + IOMMU_PAGE_SIZE); __iommu_free(tbl, dma_handle, npages); sg = sg_next(sg); } @@ -467,6 +458,42 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, spin_unlock_irqrestore(&(tbl->it_lock), flags); } +static void iommu_table_clear(struct iommu_table *tbl) +{ + if (!__kdump_flag) { + /* Clear the table in case firmware left allocations in it */ + ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); + return; + } + +#ifdef CONFIG_CRASH_DUMP + if (ppc_md.tce_get) { + unsigned long index, tceval, tcecount = 0; + + /* Reserve the existing mappings left by the first kernel. */ + for (index = 0; index < tbl->it_size; index++) { + tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); + /* + * Freed TCE entry contains 0x7fffffffffffffff on JS20 + */ + if (tceval && (tceval != 0x7fffffffffffffffUL)) { + __set_bit(index, tbl->it_map); + tcecount++; + } + } + + if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { + printk(KERN_WARNING "TCE table is full; freeing "); + printk(KERN_WARNING "%d entries for the kdump boot\n", + KDUMP_MIN_TCE_ENTRIES); + for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; + index < tbl->it_size; index++) + __clear_bit(index, tbl->it_map); + } + } +#endif +} + /* * Build a iommu_table structure. This contains a bit map which * is used to manage allocation of the tce space. @@ -493,38 +520,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) tbl->it_largehint = tbl->it_halfpoint; spin_lock_init(&tbl->it_lock); -#ifdef CONFIG_CRASH_DUMP - if (ppc_md.tce_get) { - unsigned long index; - unsigned long tceval; - unsigned long tcecount = 0; - - /* - * Reserve the existing mappings left by the first kernel. - */ - for (index = 0; index < tbl->it_size; index++) { - tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); - /* - * Freed TCE entry contains 0x7fffffffffffffff on JS20 - */ - if (tceval && (tceval != 0x7fffffffffffffffUL)) { - __set_bit(index, tbl->it_map); - tcecount++; - } - } - if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { - printk(KERN_WARNING "TCE table is full; "); - printk(KERN_WARNING "freeing %d entries for the kdump boot\n", - KDUMP_MIN_TCE_ENTRIES); - for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; - index < tbl->it_size; index++) - __clear_bit(index, tbl->it_map); - } - } -#else - /* Clear the hardware table in case firmware left allocations in it */ - ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); -#endif + iommu_table_clear(tbl); if (!welcomed) { printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", @@ -584,7 +580,7 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, BUG_ON(direction == DMA_NONE); uaddr = (unsigned long)vaddr; - npages = iommu_num_pages(uaddr, size); + npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); if (tbl) { align = 0; @@ -617,7 +613,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, BUG_ON(direction == DMA_NONE); if (tbl) { - npages = iommu_num_pages(dma_handle, size); + npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); iommu_free(tbl, dma_handle, npages); } }