X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmalloc.c;h=af77e171e339a357933d7eb6ba4b001efae5e390;hb=0ca49ca946409f87a8cd0b14d5acb6dea58de6f3;hp=46606c133e824ea4a12f8ae7ca818c333a938628;hpb=9ca0e5474d11ca044e0aacfa6e78bf17957118d2;p=linux-2.6-omap-h63xx.git diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 46606c133e8..af77e171e33 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -68,12 +68,12 @@ static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, } while (pud++, addr = next, addr != end); } -void unmap_vm_area(struct vm_struct *area) +void unmap_kernel_range(unsigned long addr, unsigned long size) { pgd_t *pgd; unsigned long next; - unsigned long addr = (unsigned long) area->addr; - unsigned long end = addr + area->size; + unsigned long start = addr; + unsigned long end = addr + size; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); @@ -84,7 +84,12 @@ void unmap_vm_area(struct vm_struct *area) continue; vunmap_pud_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); - flush_tlb_kernel_range((unsigned long) area->addr, end); + flush_tlb_kernel_range(start, end); +} + +static void unmap_vm_area(struct vm_struct *area) +{ + unmap_kernel_range((unsigned long)area->addr, area->size); } static int vmap_pte_range(pmd_t *pmd, unsigned long addr, @@ -159,6 +164,7 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) flush_cache_vmap((unsigned long) area->addr, end); return err; } +EXPORT_SYMBOL_GPL(map_vm_area); static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, @@ -181,15 +187,13 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl } addr = ALIGN(start, align); size = PAGE_ALIGN(size); - - area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node); - if (unlikely(!area)) + if (unlikely(!size)) return NULL; - if (unlikely(!size)) { - kfree (area); + area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); + + if (unlikely(!area)) return NULL; - } /* * We always allocate a guard page. @@ -240,9 +244,10 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, { return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); } +EXPORT_SYMBOL_GPL(__get_vm_area); /** - * get_vm_area - reserve a contingous kernel virtual area + * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * @@ -298,7 +303,7 @@ found: } /** - * remove_vm_area - find and remove a contingous kernel virtual area + * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. @@ -314,7 +319,7 @@ struct vm_struct *remove_vm_area(void *addr) return v; } -void __vunmap(void *addr, int deallocate_pages) +static void __vunmap(void *addr, int deallocate_pages) { struct vm_struct *area; @@ -359,7 +364,7 @@ void __vunmap(void *addr, int deallocate_pages) * vfree - release memory allocated by vmalloc() * @addr: memory base address * - * Free the virtually contiguous memory area starting at @addr, as + * Free the virtually continuous memory area starting at @addr, as * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is * NULL, no operation is performed. * @@ -430,11 +435,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); + pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, + PAGE_KERNEL, node); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, - (gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)), + (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, node); } area->pages = pages; @@ -443,7 +449,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, kfree(area); return NULL; } - memset(area->pages, 0, array_size); for (i = 0; i < area->nr_pages; i++) { if (node < 0) @@ -532,11 +537,12 @@ void *vmalloc_user(unsigned long size) void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_user); @@ -579,6 +585,14 @@ void *vmalloc_exec(unsigned long size) return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); } +#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) +#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL +#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) +#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL +#else +#define GFP_VMALLOC32 GFP_KERNEL +#endif + /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size @@ -588,7 +602,7 @@ void *vmalloc_exec(unsigned long size) */ void *vmalloc_32(unsigned long size) { - return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); + return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); } EXPORT_SYMBOL(vmalloc_32); @@ -604,12 +618,13 @@ void *vmalloc_32_user(unsigned long size) struct vm_struct *area; void *ret; - ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); - write_lock(&vmlist_lock); - area = __find_vm_area(ret); - area->flags |= VM_USERMAP; - write_unlock(&vmlist_lock); - + ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); + if (ret) { + write_lock(&vmlist_lock); + area = __find_vm_area(ret); + area->flags |= VM_USERMAP; + write_unlock(&vmlist_lock); + } return ret; } EXPORT_SYMBOL(vmalloc_32_user); @@ -700,7 +715,7 @@ finished: * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * - * Similar to remap_pfn_range (see mm/memory.c) + * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) @@ -748,3 +763,63 @@ out_einval_locked: } EXPORT_SYMBOL(remap_vmalloc_range); +/* + * Implement a stub for vmalloc_sync_all() if the architecture chose not to + * have one. + */ +void __attribute__((weak)) vmalloc_sync_all(void) +{ +} + + +static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) +{ + /* apply_to_page_range() does all the hard work. */ + return 0; +} + +/** + * alloc_vm_area - allocate a range of kernel address space + * @size: size of the area + * @returns: NULL on failure, vm_struct on success + * + * This function reserves a range of kernel address space, and + * allocates pagetables to map that range. No actual mappings + * are created. If the kernel address space is not shared + * between processes, it syncs the pagetable across all + * processes. + */ +struct vm_struct *alloc_vm_area(size_t size) +{ + struct vm_struct *area; + + area = get_vm_area(size, VM_IOREMAP); + if (area == NULL) + return NULL; + + /* + * This ensures that page tables are constructed for this region + * of kernel virtual address space and mapped into init_mm. + */ + if (apply_to_page_range(&init_mm, (unsigned long)area->addr, + area->size, f, NULL)) { + free_vm_area(area); + return NULL; + } + + /* Make sure the pagetables are constructed in process kernel + mappings */ + vmalloc_sync_all(); + + return area; +} +EXPORT_SYMBOL_GPL(alloc_vm_area); + +void free_vm_area(struct vm_struct *area) +{ + struct vm_struct *ret; + ret = remove_vm_area(area->addr); + BUG_ON(ret != area); + kfree(area); +} +EXPORT_SYMBOL_GPL(free_vm_area);