(void *)sect->sh_addr + sect->sh_size);
 #endif
 
+       sect = find_section(hdr, sechdrs, "__lwsync_fixup");
+       if (sect != NULL)
+               do_lwsync_fixups(cur_cpu_spec->cpu_features,
+                                (void *)sect->sh_addr,
+                                (void *)sect->sh_addr + sect->sh_size);
+
        return 0;
 }
 
 
                          PTRRELOC(&__start___ftr_fixup),
                          PTRRELOC(&__stop___ftr_fixup));
 
+       do_lwsync_fixups(spec->cpu_features,
+                        PTRRELOC(&__start___lwsync_fixup),
+                        PTRRELOC(&__stop___lwsync_fixup));
+
        return KERNELBASE + offset;
 }
 
 
                          &__start___ftr_fixup, &__stop___ftr_fixup);
        do_feature_fixups(powerpc_firmware_features,
                          &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+       do_lwsync_fixups(cur_cpu_spec->cpu_features,
+                        &__start___lwsync_fixup, &__stop___lwsync_fixup);
 
        /*
         * Unflatten the device-tree passed by prom_init or kexec
 
        if (start64)
                do_feature_fixups(powerpc_firmware_features,
                                  start64, start64 + size64);
+
+       start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64);
+       if (start64)
+               do_lwsync_fixups(cur_cpu_spec->cpu_features,
+                                start64, start64 + size64);
 #endif /* CONFIG_PPC64 */
 
        start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
                                  start32, start32 + size32);
 #endif /* CONFIG_PPC64 */
 
+       start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32);
+       if (start32)
+               do_lwsync_fixups(cur_cpu_spec->cpu_features,
+                                start32, start32 + size32);
+
        return 0;
 }
 
 
        . = ALIGN(8);
        __ftr_fixup     : { *(__ftr_fixup) }
 
+       . = ALIGN(8);
+       __lwsync_fixup  : { *(__lwsync_fixup) }
+
 #ifdef CONFIG_PPC64
        . = ALIGN(8);
        __fw_ftr_fixup  : { *(__fw_ftr_fixup) }
 
        . = ALIGN(8);
        __ftr_fixup     : { *(__ftr_fixup) }
 
+       . = ALIGN(8);
+       __lwsync_fixup  : { *(__lwsync_fixup) }
+
        . = ALIGN(8);
        __fw_ftr_fixup  : { *(__fw_ftr_fixup) }
 
 
                *(__ftr_fixup)
                __stop___ftr_fixup = .;
        }
+       . = ALIGN(8);
+       __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
+               __start___lwsync_fixup = .;
+               *(__lwsync_fixup)
+               __stop___lwsync_fixup = .;
+       }
 #ifdef CONFIG_PPC64
        . = ALIGN(8);
        __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
 
 
 #include <asm/feature-fixups.h>
 #include <asm/ppc_asm.h>
+#include <asm/synch.h>
 
        .text
 
 MAKE_MACRO_TEST(FW_FTR);
 MAKE_MACRO_TEST_EXPECTED(FW_FTR);
 #endif
+
+globl(lwsync_fixup_test)
+1:     or      1,1,1
+       LWSYNC
+globl(end_lwsync_fixup_test)
+
+globl(lwsync_fixup_test_expected_LWSYNC)
+1:     or      1,1,1
+       lwsync
+
+globl(lwsync_fixup_test_expected_SYNC)
+1:     or      1,1,1
+       sync
+
 
        }
 }
 
+void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+       unsigned int *start, *end, *dest;
+
+       if (!(value & CPU_FTR_LWSYNC))
+               return ;
+
+       start = fixup_start;
+       end = fixup_end;
+
+       for (; start < end; start++) {
+               dest = (void *)start + *start;
+               patch_instruction(dest, PPC_LWSYNC_INSTR);
+       }
+}
+
 #ifdef CONFIG_FTR_FIXUP_SELFTEST
 
 #define check(x)       \
 #endif
 }
 
+static void test_lwsync_macros(void)
+{
+       extern void lwsync_fixup_test;
+       extern void end_lwsync_fixup_test;
+       extern void lwsync_fixup_test_expected_LWSYNC;
+       extern void lwsync_fixup_test_expected_SYNC;
+       unsigned long size = &end_lwsync_fixup_test -
+                            &lwsync_fixup_test;
+
+       /* The fixups have already been done for us during boot */
+       if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
+               check(memcmp(&lwsync_fixup_test,
+                            &lwsync_fixup_test_expected_LWSYNC, size) == 0);
+       } else {
+               check(memcmp(&lwsync_fixup_test,
+                            &lwsync_fixup_test_expected_SYNC, size) == 0);
+       }
+}
+
 static int __init test_feature_fixups(void)
 {
        printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
        test_alternative_case_with_external_branch();
        test_cpu_macros();
        test_fw_macros();
+       test_lwsync_macros();
 
        return 0;
 }
 
 
 #include <asm/types.h>
 
-#define PPC_NOP_INSTR  0x60000000
+#define PPC_NOP_INSTR          0x60000000
+#define PPC_LWSYNC_INSTR       0x7c2004ac
 
 /* Flags for create_branch:
  * "b"   == create_branch(addr, target, 0);
 
 #define CPU_FTR_UNIFIED_ID_CACHE       ASM_CONST(0x0000000001000000)
 #define CPU_FTR_SPE                    ASM_CONST(0x0000000002000000)
 #define CPU_FTR_NEED_PAIRED_STWCX      ASM_CONST(0x0000000004000000)
+#define CPU_FTR_LWSYNC                 ASM_CONST(0x0000000008000000)
 
 /*
  * Add the 64-bit processor unique features in the top half of the word;
            CPU_FTR_NODSISRALIGN)
 #define CPU_FTRS_E500MC        (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
            CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
-           CPU_FTR_L2CSR)
+           CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
 #define CPU_FTRS_GENERIC_32    (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
-#define CPU_FTRS_POWER3        (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER3        (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
-#define CPU_FTRS_RS64  (CPU_FTR_USE_TB | \
+#define CPU_FTRS_RS64  (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
            CPU_FTR_MMCRA | CPU_FTR_CTRL)
-#define CPU_FTRS_POWER4        (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER4        (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA)
-#define CPU_FTRS_PPC970        (CPU_FTR_USE_TB | \
+#define CPU_FTRS_PPC970        (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA)
-#define CPU_FTRS_POWER5        (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER5        (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
            CPU_FTR_PURR)
-#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
            CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
            CPU_FTR_DSCR)
-#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
            CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
            CPU_FTR_DSCR)
-#define CPU_FTRS_CELL  (CPU_FTR_USE_TB | \
+#define CPU_FTRS_CELL  (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
-#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \
+#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
            CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
 
 
 #endif /* __ASSEMBLY__ */
 
+/* LWSYNC feature sections */
+#define START_LWSYNC_SECTION(label)    label##1:
+#define MAKE_LWSYNC_SECTION_ENTRY(label, sect)         \
+label##2:                                              \
+       .pushsection sect,"a";                          \
+       .align 2;                                       \
+label##3:                                              \
+       .long label##1b-label##3b;                      \
+       .popsection;
+
 #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
 
 #ifdef __KERNEL__
 
 #include <linux/stringify.h>
+#include <asm/feature-fixups.h>
 
-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
-#define __SUBARCH_HAS_LWSYNC
-#endif
+#ifndef __ASSEMBLY__
+extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+                            void *fixup_end);
+
+static inline void eieio(void)
+{
+       __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+       __asm__ __volatile__ ("isync" : : : "memory");
+}
+#endif /* __ASSEMBLY__ */
 
-#ifdef __SUBARCH_HAS_LWSYNC
+#if defined(__powerpc64__)
 #    define LWSYNC     lwsync
+#elif defined(CONFIG_E500)
+#    define LWSYNC                                     \
+       START_LWSYNC_SECTION(96);                       \
+       sync;                                           \
+       MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
 #else
 #    define LWSYNC     sync
 #endif
 
 #ifdef CONFIG_SMP
 #define ISYNC_ON_SMP   "\n\tisync\n"
-#define LWSYNC_ON_SMP  __stringify(LWSYNC) "\n"
+#define LWSYNC_ON_SMP  stringify_in_c(LWSYNC) "\n"
 #else
 #define ISYNC_ON_SMP
 #define LWSYNC_ON_SMP
 #endif
 
-static inline void eieio(void)
-{
-       __asm__ __volatile__ ("eieio" : : : "memory");
-}
-
-static inline void isync(void)
-{
-       __asm__ __volatile__ ("isync" : : : "memory");
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_SYNCH_H */