}
EXPORT_SYMBOL_GPL(map_vm_area);
+/*
+ * Map a vmalloc()-space virtual address to the physical page.
+ */
+struct page *vmalloc_to_page(const void *vmalloc_addr)
+{
+ unsigned long addr = (unsigned long) vmalloc_addr;
+ struct page *page = NULL;
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ if (!pgd_none(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ page = pte_page(pte);
+ pte_unmap(ptep);
+ }
+ }
+ }
+ return page;
+}
+EXPORT_SYMBOL(vmalloc_to_page);
+
+/*
+ * Map a vmalloc()-space virtual address to the physical page frame number.
+ */
+unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
+{
+ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
+}
+EXPORT_SYMBOL(vmalloc_to_pfn);
+
static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
int node, gfp_t gfp_mask)
if (unlikely(!size))
return NULL;
- area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
+ area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
+
if (unlikely(!area))
return NULL;
if (addr > end - size)
goto out;
}
+ if ((size + addr) < addr)
+ goto out;
+ if (addr > end - size)
+ goto out;
found:
area->next = *p;
EXPORT_SYMBOL_GPL(__get_vm_area);
/**
- * get_vm_area - reserve a contingous kernel virtual area
+ * get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area
* @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
*
}
/* Caller must hold vmlist_lock */
-static struct vm_struct *__find_vm_area(void *addr)
+static struct vm_struct *__find_vm_area(const void *addr)
{
struct vm_struct *tmp;
}
/* Caller must hold vmlist_lock */
-static struct vm_struct *__remove_vm_area(void *addr)
+static struct vm_struct *__remove_vm_area(const void *addr)
{
struct vm_struct **p, *tmp;
}
/**
- * remove_vm_area - find and remove a contingous kernel virtual area
+ * remove_vm_area - find and remove a continuous kernel virtual area
* @addr: base address
*
* Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags.
*/
-struct vm_struct *remove_vm_area(void *addr)
+struct vm_struct *remove_vm_area(const void *addr)
{
struct vm_struct *v;
write_lock(&vmlist_lock);
return v;
}
-static void __vunmap(void *addr, int deallocate_pages)
+static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
int i;
for (i = 0; i < area->nr_pages; i++) {
- BUG_ON(!area->pages[i]);
- __free_page(area->pages[i]);
+ struct page *page = area->pages[i];
+
+ BUG_ON(!page);
+ __free_page(page);
}
if (area->flags & VM_VPAGES)
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
*
- * Free the virtually contiguous memory area starting at @addr, as
+ * Free the virtually continuous memory area starting at @addr, as
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
* NULL, no operation is performed.
*
* Must not be called in interrupt context.
*/
-void vfree(void *addr)
+void vfree(const void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 1);
*
* Must not be called in interrupt context.
*/
-void vunmap(void *addr)
+void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 0);
}
EXPORT_SYMBOL(vmap);
-void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node)
+static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ pgprot_t prot, int node)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size,
- (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
+ (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
node);
}
area->pages = pages;
}
for (i = 0; i < area->nr_pages; i++) {
+ struct page *page;
+
if (node < 0)
- area->pages[i] = alloc_page(gfp_mask);
+ page = alloc_page(gfp_mask);
else
- area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
- if (unlikely(!area->pages[i])) {
+ page = alloc_pages_node(node, gfp_mask, 0);
+
+ if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
goto fail;
}
+ area->pages[i] = page;
}
if (map_vm_area(area, prot, &pages))