return register_new_memory(__pfn_to_section(phys_start_pfn));
 }
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static int __remove_section(struct zone *zone, struct mem_section *ms)
+{
+       /*
+        * XXX: Freeing memmap with vmemmap is not implement yet.
+        *      This should be removed later.
+        */
+       return -EBUSY;
+}
+#else
 static int __remove_section(struct zone *zone, struct mem_section *ms)
 {
        unsigned long flags;
        pgdat_resize_unlock(pgdat, &flags);
        return 0;
 }
+#endif
 
 /*
  * Reasonably generic function for adding memory.  It is
 
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include "internal.h"
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 {
        return; /* XXX: Not implemented yet */
 }
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+}
 #else
 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
 {
                free_pages((unsigned long)memmap,
                           get_order(sizeof(struct page) * nr_pages));
 }
+
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+       unsigned long maps_section_nr, removing_section_nr, i;
+       int magic;
+
+       for (i = 0; i < nr_pages; i++, page++) {
+               magic = atomic_read(&page->_mapcount);
+
+               BUG_ON(magic == NODE_INFO);
+
+               maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
+               removing_section_nr = page->private;
+
+               /*
+                * When this function is called, the removing section is
+                * logical offlined state. This means all pages are isolated
+                * from page allocator. If removing section's memmap is placed
+                * on the same section, it must not be freed.
+                * If it is freed, page allocator may allocate it which will
+                * be removed physically soon.
+                */
+               if (maps_section_nr != removing_section_nr)
+                       put_page_bootmem(page);
+       }
+}
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
 {
+       struct page *usemap_page;
+       unsigned long nr_pages;
+
        if (!usemap)
                return;
 
+       usemap_page = virt_to_page(usemap);
        /*
         * Check to see if allocation came from hot-plug-add
         */
-       if (PageSlab(virt_to_page(usemap))) {
+       if (PageSlab(usemap_page)) {
                kfree(usemap);
                if (memmap)
                        __kfree_section_memmap(memmap, PAGES_PER_SECTION);
        }
 
        /*
-        * TODO: Allocations came from bootmem - how do I free up ?
+        * The usemap came from bootmem. This is packed with other usemaps
+        * on the section which has pgdat at boot time. Just keep it as is now.
         */
-       printk(KERN_WARNING "Not freeing up allocations from bootmem "
-                       "- leaking memory\n");
+
+       if (memmap) {
+               struct page *memmap_page;
+               memmap_page = virt_to_page(memmap);
+
+               nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+                       >> PAGE_SHIFT;
+
+               free_map_bootmem(memmap_page, nr_pages);
+       }
 }
 
 /*