X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Fsparse.c;h=f6a43c09c322cdb39077392ec1c77e9904c52d82;hb=882bebaaca4bb1484078d44ef011f918c0e1e14e;hp=e03b39f3540f79adf384c476f185e7a9da91e4ac;hpb=eedab661a51966c454e38c17266a531aa58b4a98;p=linux-2.6-omap-h63xx.git diff --git a/mm/sparse.c b/mm/sparse.c index e03b39f3540..f6a43c09c32 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include /* * Permanent SPARSEMEM data: @@ -41,6 +43,15 @@ int page_to_nid(struct page *page) return section_to_node_table[page_to_section(page)]; } EXPORT_SYMBOL(page_to_nid); + +static void set_section_nid(unsigned long section_nr, int nid) +{ + section_to_node_table[section_nr] = nid; +} +#else /* !NODE_NOT_IN_PAGE_FLAGS */ +static inline void set_section_nid(unsigned long section_nr, int nid) +{ +} #endif #ifdef CONFIG_SPARSEMEM_EXTREME @@ -68,14 +79,12 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid) struct mem_section *section; int ret = 0; -#ifdef NODE_NOT_IN_PAGE_FLAGS - section_to_node_table[section_nr] = nid; -#endif - if (mem_section[root]) return -EEXIST; section = sparse_index_alloc(nid); + if (!section) + return -ENOMEM; /* * This lock keeps two different sections from * reallocating for the same index @@ -101,7 +110,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid) /* * Although written for the SPARSEMEM_EXTREME case, this happens - * to also work for the flat array case becase + * to also work for the flat array case because * NR_SECTION_ROOTS==NR_MEM_SECTIONS. */ int __section_nr(struct mem_section* ms) @@ -148,6 +157,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) struct mem_section *ms; sparse_index_init(section, nid); + set_section_nid(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) @@ -170,7 +180,7 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, if (nid != early_pfn_to_nid(pfn)) continue; - if (pfn_valid(pfn)) + if (pfn_present(pfn)) nr_pages += PAGES_PER_SECTION; } @@ -198,44 +208,79 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn } static int __meminit sparse_init_one_section(struct mem_section *ms, - unsigned long pnum, struct page *mem_map) + unsigned long pnum, struct page *mem_map, + unsigned long *pageblock_bitmap) { - if (!valid_section(ms)) + if (!present_section(ms)) return -EINVAL; ms->section_mem_map &= ~SECTION_MAP_MASK; - ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum); + ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | + SECTION_HAS_MEM_MAP; + ms->pageblock_flags = pageblock_bitmap; return 1; } -__attribute__((weak)) -void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) +static unsigned long usemap_size(void) { - return NULL; + unsigned long size_bytes; + size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; + size_bytes = roundup(size_bytes, sizeof(unsigned long)); + return size_bytes; } -static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) +#ifdef CONFIG_MEMORY_HOTPLUG +static unsigned long *__kmalloc_section_usemap(void) { - struct page *map; + return kmalloc(usemap_size(), GFP_KERNEL); +} +#endif /* CONFIG_MEMORY_HOTPLUG */ + +static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum) +{ + unsigned long *usemap; struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); - map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); - if (map) - return map; + usemap = alloc_bootmem_node(NODE_DATA(nid), usemap_size()); + if (usemap) + return usemap; + + /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */ + nid = 0; + + printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); + return NULL; +} + +#ifndef CONFIG_SPARSEMEM_VMEMMAP +struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) +{ + struct page *map; - map = alloc_bootmem_high_node(NODE_DATA(nid), - sizeof(struct page) * PAGES_PER_SECTION); + map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; map = alloc_bootmem_node(NODE_DATA(nid), sizeof(struct page) * PAGES_PER_SECTION); + return map; +} +#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ + +struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) +{ + struct page *map; + struct mem_section *ms = __nr_to_section(pnum); + int nid = sparse_early_nid(ms); + + map = sparse_mem_map_populate(pnum, nid); if (map) return map; - printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); + printk(KERN_ERR "%s: sparsemem memory map backing failed " + "some memory will not be available.\n", __FUNCTION__); ms->section_mem_map = 0; return NULL; } @@ -248,19 +293,38 @@ void __init sparse_init(void) { unsigned long pnum; struct page *map; + unsigned long *usemap; for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { - if (!valid_section_nr(pnum)) + if (!present_section_nr(pnum)) continue; map = sparse_early_mem_map_alloc(pnum); if (!map) continue; - sparse_init_one_section(__nr_to_section(pnum), pnum, map); + + usemap = sparse_early_usemap_alloc(pnum); + if (!usemap) + continue; + + sparse_init_one_section(__nr_to_section(pnum), pnum, map, + usemap); } } #ifdef CONFIG_MEMORY_HOTPLUG +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) +{ + /* This will make the necessary allocations eventually. */ + return sparse_mem_map_populate(pnum, nid); +} +static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) +{ + return; /* XXX: Not implemented yet */ +} +#else static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; @@ -283,22 +347,21 @@ got_map_ptr: return ret; } -static int vaddr_in_vmalloc_area(void *addr) +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) { - if (addr >= (void *)VMALLOC_START && - addr < (void *)VMALLOC_END) - return 1; - return 0; + return __kmalloc_section_memmap(nr_pages); } static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { - if (vaddr_in_vmalloc_area(memmap)) + if (is_vmalloc_addr(memmap)) vfree(memmap); else free_pages((unsigned long)memmap, get_order(sizeof(struct page) * nr_pages)); } +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* * returns the number of sections whose mem_maps were properly @@ -312,6 +375,7 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; + unsigned long *usemap; unsigned long flags; int ret; @@ -319,8 +383,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, * no locking for this, because it does its own * plus, it does a kmalloc */ - sparse_index_init(section_nr, pgdat->node_id); - memmap = __kmalloc_section_memmap(nr_pages); + ret = sparse_index_init(section_nr, pgdat->node_id); + if (ret < 0 && ret != -EEXIST) + return ret; + memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); + if (!memmap) + return -ENOMEM; + usemap = __kmalloc_section_usemap(); + if (!usemap) { + __kfree_section_memmap(memmap, nr_pages); + return -ENOMEM; + } pgdat_resize_lock(pgdat, &flags); @@ -329,14 +402,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, ret = -EEXIST; goto out; } + ms->section_mem_map |= SECTION_MARKED_PRESENT; - ret = sparse_init_one_section(ms, section_nr, memmap); + ret = sparse_init_one_section(ms, section_nr, memmap, usemap); out: pgdat_resize_unlock(pgdat, &flags); - if (ret <= 0) + if (ret <= 0) { + kfree(usemap); __kfree_section_memmap(memmap, nr_pages); + } return ret; } #endif