pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte;
+       unsigned pages_2m = 0, pages_4k = 0;
 
        pgd_idx = pgd_index(PAGE_OFFSET);
        pgd = pgd_base + pgd_idx;
                                    is_kernel_text(addr2))
                                        prot = PAGE_KERNEL_LARGE_EXEC;
 
+                               pages_2m++;
                                set_pmd(pmd, pfn_pmd(pfn, prot));
 
                                pfn += PTRS_PER_PTE;
                                if (is_kernel_text(addr))
                                        prot = PAGE_KERNEL_EXEC;
 
+                               pages_4k++;
                                set_pte(pte, pfn_pte(pfn, prot));
                        }
                        max_pfn_mapped = pfn;
                }
        }
+       update_page_count(PG_LEVEL_2M, pages_2m);
+       update_page_count(PG_LEVEL_4K, pages_4k);
 }
 
 static inline int page_kills_ppro(unsigned long pagenr)
 
 static unsigned long __meminit
 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
 {
+       unsigned long pages = 0;
+
        int i = pmd_index(address);
 
        for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
                if (pmd_val(*pmd))
                        continue;
 
+               pages++;
                set_pte((pte_t *)pmd,
                        pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
        }
+       update_page_count(PG_LEVEL_2M, pages);
        return address;
 }
 
 static unsigned long __meminit
 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
 {
+       unsigned long pages = 0;
        unsigned long last_map_addr = end;
        int i = pud_index(addr);
 
                }
 
                if (direct_gbpages) {
+                       pages++;
                        set_pte((pte_t *)pud,
                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
                        last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
                unmap_low_page(pmd);
        }
        __flush_tlb_all();
+       update_page_count(PG_LEVEL_1G, pages);
 
        return last_map_addr >> PAGE_SHIFT;
 }
 
        unsigned        force_split : 1;
 };
 
+static unsigned long direct_pages_count[PG_LEVEL_NUM];
+
+void __meminit update_page_count(int level, unsigned long pages)
+{
+#ifdef CONFIG_PROC_FS
+       unsigned long flags;
+       /* Protect against CPA */
+       spin_lock_irqsave(&pgd_lock, flags);
+       direct_pages_count[level] += pages;
+       spin_unlock_irqrestore(&pgd_lock, flags);
+#endif
+}
+
 #ifdef CONFIG_X86_64
 
 static inline unsigned long highmap_start_pfn(void)
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
                set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
 
+       if (address >= (unsigned long)__va(0) &&
+               address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) {
+               direct_pages_count[level]--;
+               direct_pages_count[level - 1] += PTRS_PER_PTE;
+       }
+
        /*
         * Install the new, split up pagetable. Important details here:
         *
 
 #endif /* CONFIG_DEBUG_PAGEALLOC */
 
+#ifdef CONFIG_PROC_FS
+int arch_report_meminfo(char *page)
+{
+       int n;
+       n = sprintf(page, "DirectMap4k:  %8lu\n"
+                         "DirectMap2M:  %8lu\n",
+                       direct_pages_count[PG_LEVEL_4K],
+                       direct_pages_count[PG_LEVEL_2M]);
+#ifdef CONFIG_X86_64
+       n += sprintf(page + n, "DirectMap1G:  %8lu\n",
+                       direct_pages_count[PG_LEVEL_1G]);
+#endif
+       return n;
+}
+#endif
+
 /*
  * The testcases use internal knowledge of the implementation that shouldn't
  * be exposed to the rest of the kernel. Include these directly here.
 
        return proc_calc_metrics(page, start, off, count, eof, len);
 }
 
+int __attribute__((weak)) arch_report_meminfo(char *page)
+{
+       return 0;
+}
+
 static int meminfo_read_proc(char *page, char **start, off_t off,
                                 int count, int *eof, void *data)
 {
 
                len += hugetlb_report_meminfo(page + len);
 
+       len += arch_report_meminfo(page + len);
+
        return proc_calc_metrics(page, start, off, count, eof, len);
 #undef K
 }
 
        PG_LEVEL_4K,
        PG_LEVEL_2M,
        PG_LEVEL_1G,
+       PG_LEVEL_NUM
 };
 
+void update_page_count(int level, unsigned long pages);
+
 /*
  * Helper function that returns the kernel pagetable entry controlling
  * the virtual address 'address'. NULL means no pagetable entry present.