_text, _etext);
                }
                free_init_pages("SMP alternatives",
-                               (unsigned long)__smp_alt_begin,
-                               (unsigned long)__smp_alt_end);
+                               __pa_symbol(&__smp_alt_begin),
+                               __pa_symbol(&__smp_alt_end));
        } else {
                alternatives_smp_save(__smp_alt_instructions,
                                      __smp_alt_instructions_end);
 
        unsigned long addr;
 
        for (addr = begin; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
+               struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
+               ClearPageReserved(page);
+               init_page_count(page);
+               memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
+               __free_page(page);
                totalram_pages++;
        }
        printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
 void free_initmem(void)
 {
        free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
+                       __pa_symbol(&__init_begin),
+                       __pa_symbol(&__init_end));
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-       free_init_pages("initrd memory", start, end);
+       free_init_pages("initrd memory", __pa(start), __pa(end));
 }
 #endif
 
 
 
        page_list[PA_CONTROL_PAGE] = __pa(control_page);
        page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
-       page_list[PA_PGD] = __pa(kexec_pgd);
+       page_list[PA_PGD] = __pa_symbol(&kexec_pgd);
        page_list[VA_PGD] = (unsigned long)kexec_pgd;
-       page_list[PA_PUD_0] = __pa(kexec_pud0);
+       page_list[PA_PUD_0] = __pa_symbol(&kexec_pud0);
        page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
-       page_list[PA_PMD_0] = __pa(kexec_pmd0);
+       page_list[PA_PMD_0] = __pa_symbol(&kexec_pmd0);
        page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
-       page_list[PA_PTE_0] = __pa(kexec_pte0);
+       page_list[PA_PTE_0] = __pa_symbol(&kexec_pte0);
        page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
-       page_list[PA_PUD_1] = __pa(kexec_pud1);
+       page_list[PA_PUD_1] = __pa_symbol(&kexec_pud1);
        page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
-       page_list[PA_PMD_1] = __pa(kexec_pmd1);
+       page_list[PA_PMD_1] = __pa_symbol(&kexec_pmd1);
        page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
-       page_list[PA_PTE_1] = __pa(kexec_pte1);
+       page_list[PA_PTE_1] = __pa_symbol(&kexec_pte1);
        page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
 
        page_list[PA_TABLE_PAGE] =
 
        init_mm.end_code = (unsigned long) &_etext;
        init_mm.end_data = (unsigned long) &_edata;
        init_mm.brk = (unsigned long) &_end;
+       init_mm.pgd = __va(__pa_symbol(&init_level4_pgt));
 
-       code_resource.start = virt_to_phys(&_text);
-       code_resource.end = virt_to_phys(&_etext)-1;
-       data_resource.start = virt_to_phys(&_etext);
-       data_resource.end = virt_to_phys(&_edata)-1;
+       code_resource.start = __pa_symbol(&_text);
+       code_resource.end = __pa_symbol(&_etext)-1;
+       data_resource.start = __pa_symbol(&_etext);
+       data_resource.end = __pa_symbol(&_edata)-1;
 
        early_identify_cpu(&boot_cpu_data);
 
 
        if (read_pda(mmu_state) == TLBSTATE_OK)
                BUG();
        cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
-       load_cr3(swapper_pg_dir);
+       load_cr3(init_mm.pgd);
 }
 
 /*
 
 
 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
 #define __syscall_clobber "r11","rcx","memory"
+#define __pa_vsymbol(x)                        \
+       ({unsigned long v;              \
+       extern char __vsyscall_0;       \
+         asm("" : "=r" (v) : "0" (x)); \
+         ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
 
 struct vsyscall_gtod_data_t {
        seqlock_t lock;
                return ret;
        /* gcc has some trouble with __va(__pa()), so just do it this
           way. */
-       map1 = ioremap(__pa_symbol(&vsysc1), 2);
+       map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
        if (!map1)
                return -ENOMEM;
-       map2 = ioremap(__pa_symbol(&vsysc2), 2);
+       map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
        if (!map2) {
                ret = -ENOMEM;
                goto out;
 
 
        printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
        for (addr = begin; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)(addr & ~(PAGE_SIZE-1)),
-                       POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
+               struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
+               ClearPageReserved(page);
+               init_page_count(page);
+               memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
+               __free_page(page);
                totalram_pages++;
        }
 }
        memset(__initdata_begin, POISON_FREE_INITDATA,
                __initdata_end - __initdata_begin);
        free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
+                       __pa_symbol(&__init_begin),
+                       __pa_symbol(&__init_end));
 }
 
 #ifdef CONFIG_DEBUG_RODATA
 
 void mark_rodata_ro(void)
 {
-       unsigned long addr = (unsigned long)__start_rodata;
+       unsigned long addr = (unsigned long)__va(__pa_symbol(&__start_rodata));
+       unsigned long end  = (unsigned long)__va(__pa_symbol(&__end_rodata));
 
-       for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
+       for (; addr < end; addr += PAGE_SIZE)
                change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
 
        printk ("Write protecting the kernel read-only data: %luk\n",
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-       free_init_pages("initrd memory", start, end);
+       free_init_pages("initrd memory", __pa(start), __pa(end));
 }
 #endif
 
 
        SetPagePrivate(base);
        page_private(base) = 0;
 
-       address = __pa(address);
        addr = address & LARGE_PAGE_MASK; 
        pbase = (pte_t *)page_address(base);
        for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  * No more special protections in this 2/4MB area - revert to a
  * large page again. 
  */
-static void revert_page(unsigned long address, pgprot_t ref_prot)
+static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t large_pte;
-       unsigned long pfn;
 
        pgd = pgd_offset_k(address);
        BUG_ON(pgd_none(*pgd));
        BUG_ON(pud_none(*pud));
        pmd = pmd_offset(pud, address);
        BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
-       pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
        large_pte = pfn_pte(pfn, ref_prot);
        large_pte = pte_mkhuge(large_pte);
        set_pte((pte_t *)pmd, large_pte);
                         */
                        struct page *split;
                        ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
-                       split = split_large_page(address, prot, ref_prot2);
+                       split = split_large_page(pfn << PAGE_SHIFT, prot,
+                                                       ref_prot2);
                        if (!split)
                                return -ENOMEM;
                        set_pte(kpte, mk_pte(split, ref_prot2));
 
        if (page_private(kpte_page) == 0) {
                save_page(kpte_page);
-               revert_page(address, ref_prot);
+               revert_page(address, pfn, ref_prot);
        }
        return 0;
 } 
  */
 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
 {
+       unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT;
        int err = 0; 
        int i; 
 
                        break; 
                /* Handle kernel mapping too which aliases part of the
                 * lowmem */
-               if (__pa(address) < KERNEL_TEXT_SIZE) {
+               if ((pfn >= phys_base_pfn) &&
+                       ((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) {
                        unsigned long addr2;
                        pgprot_t prot2;
-                       addr2 = __START_KERNEL_map + __pa(address);
+                       addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT);
                        /* Make sure the kernel mappings stay executable */
                        prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
                        err = __change_page_attr(addr2, pfn, prot2,
 
 
 /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
    Otherwise you risk miscompilation. */ 
-#define __pa(x)                        (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
+#define __pa(x)                        ((unsigned long)(x) - PAGE_OFFSET)
 /* __pa_symbol should be used for C visible symbols.
    This seems to be the official gcc blessed way to do such arithmetic. */ 
 #define __pa_symbol(x)         \
        ({unsigned long v;  \
          asm("" : "=r" (v) : "0" (x)); \
-         __pa(v); })
+         (v - __START_KERNEL_map); })
 
 #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define __boot_va(x)           __va(x)
-#define __boot_pa(x)           __pa(x)
 #ifdef CONFIG_FLATMEM
 #define pfn_valid(pfn)         ((pfn) < end_pfn)
 #endif
 
 extern pgd_t init_level4_pgt[];
 extern unsigned long __supported_pte_mask;
 
-#define swapper_pg_dir init_level4_pgt
+#define swapper_pg_dir ((pgd_t *)NULL)
 
 extern void paging_init(void);
 extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#define ZERO_PAGE(vaddr) (pfn_to_page(__pa_symbol(&empty_zero_page) >> PAGE_SHIFT))
 
 #endif /* !__ASSEMBLY__ */