#ifndef __ASM_ARM_CACHETYPE_H
 #define __ASM_ARM_CACHETYPE_H
 
-#include <asm/cputype.h>
+#define CACHEID_VIVT                   (1 << 0)
+#define CACHEID_VIPT_NONALIASING       (1 << 1)
+#define CACHEID_VIPT_ALIASING          (1 << 2)
+#define CACHEID_VIPT                   (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
+#define CACHEID_ASID_TAGGED            (1 << 3)
 
-#define __cacheid_present(val)                 (val != read_cpuid_id())
-#define __cacheid_type_v7(val)                 ((val & (7 << 29)) == (4 << 29))
+extern unsigned int cacheid;
 
-#define __cacheid_vivt_prev7(val)              ((val & (15 << 25)) != (14 << 25))
-#define __cacheid_vipt_prev7(val)              ((val & (15 << 25)) == (14 << 25))
-#define __cacheid_vipt_nonaliasing_prev7(val)  ((val & (15 << 25 | 1 << 23)) == (14 << 25))
-#define __cacheid_vipt_aliasing_prev7(val)     ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
+#define cache_is_vivt()                        cacheid_is(CACHEID_VIVT)
+#define cache_is_vipt()                        cacheid_is(CACHEID_VIPT)
+#define cache_is_vipt_nonaliasing()    cacheid_is(CACHEID_VIPT_NONALIASING)
+#define cache_is_vipt_aliasing()       cacheid_is(CACHEID_VIPT_ALIASING)
+#define icache_is_vivt_asid_tagged()   cacheid_is(CACHEID_ASID_TAGGED)
 
-#define __cacheid_vivt(val)                    (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
-#define __cacheid_vipt(val)                    (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
-#define __cacheid_vipt_nonaliasing(val)                (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
-#define __cacheid_vipt_aliasing(val)           (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
-#define __cacheid_vivt_asid_tagged_instr(val)  (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
-
-#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
 /*
- * VIVT caches only
+ * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
+ * Mask out support which will never be present on newer CPUs.
+ * - v6+ is never VIVT
+ * - v7+ VIPT never aliases
  */
-#define cache_is_vivt()                        1
-#define cache_is_vipt()                        0
-#define cache_is_vipt_nonaliasing()    0
-#define cache_is_vipt_aliasing()       0
-#define icache_is_vivt_asid_tagged()   0
+#if __LINUX_ARM_ARCH__ >= 7
+#define __CACHEID_ARCH_MIN     (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
+#elif __LINUX_ARM_ARCH__ >= 6
+#define        __CACHEID_ARCH_MIN      (~CACHEID_VIVT)
+#else
+#define __CACHEID_ARCH_MIN     (~0)
+#endif
 
-#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
 /*
- * VIPT caches only
+ * Mask out support which isn't configured
  */
-#define cache_is_vivt()                        0
-#define cache_is_vipt()                        1
-#define cache_is_vipt_nonaliasing()                                    \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_vipt_nonaliasing(__val);                      \
-       })
-
-#define cache_is_vipt_aliasing()                                       \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_vipt_aliasing(__val);                         \
-       })
-
-#define icache_is_vivt_asid_tagged()                                   \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_vivt_asid_tagged_instr(__val);                \
-       })
-
+#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS       (CACHEID_VIVT)
+#define __CACHEID_NEVER                (~CACHEID_VIVT)
+#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS       (0)
+#define __CACHEID_NEVER                (CACHEID_VIVT)
 #else
-/*
- * VIVT or VIPT caches.  Note that this is unreliable since ARM926
- * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
- * There's no way to tell from the CacheType register what type (!)
- * the cache is.
- */
-#define cache_is_vivt()                                                        \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               (!__cacheid_present(__val)) || __cacheid_vivt(__val);   \
-       })
-
-#define cache_is_vipt()                                                        \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_present(__val) && __cacheid_vipt(__val);      \
-       })
-
-#define cache_is_vipt_nonaliasing()                                    \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_present(__val) &&                             \
-                __cacheid_vipt_nonaliasing(__val);                     \
-       })
-
-#define cache_is_vipt_aliasing()                                       \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_present(__val) &&                             \
-                __cacheid_vipt_aliasing(__val);                        \
-       })
-
-#define icache_is_vivt_asid_tagged()                                   \
-       ({                                                              \
-               unsigned int __val = read_cpuid_cachetype();            \
-               __cacheid_present(__val) &&                             \
-                __cacheid_vivt_asid_tagged_instr(__val);               \
-       })
-
+#define __CACHEID_ALWAYS       (0)
+#define __CACHEID_NEVER                (0)
 #endif
 
+static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
+{
+       return (__CACHEID_ALWAYS & mask) |
+              (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
+}
+
 #endif
 
 EXPORT_SYMBOL(processor_id);
 unsigned int __machine_arch_type;
 EXPORT_SYMBOL(__machine_arch_type);
+unsigned int cacheid;
+EXPORT_SYMBOL(cacheid);
 
 unsigned int __atags_pointer __initdata;
 
        return cpu_arch;
 }
 
+static void __init cacheid_init(void)
+{
+       unsigned int cachetype = read_cpuid_cachetype();
+       unsigned int arch = cpu_architecture();
+
+       if (arch >= CPU_ARCH_ARMv7) {
+               cacheid = CACHEID_VIPT_NONALIASING;
+               if ((cachetype & (3 << 14)) == 1 << 14)
+                       cacheid |= CACHEID_ASID_TAGGED;
+       } else if (arch >= CPU_ARCH_ARMv6) {
+               if (cachetype & (1 << 23))
+                       cacheid = CACHEID_VIPT_ALIASING;
+               else
+                       cacheid = CACHEID_VIPT_NONALIASING;
+       } else {
+               cacheid = CACHEID_VIVT;
+       }
+}
+
 /*
  * These functions re-use the assembly code in head.S, which
  * already provide the required functionality.
        elf_hwcap &= ~HWCAP_THUMB;
 #endif
 
+       cacheid_init();
        cpu_proc_init();
 }