2 * linux/include/asm-arm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
13 #include <linux/sched.h>
17 #include <asm/shmparam.h>
19 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
28 #if defined(CONFIG_CPU_CACHE_V3)
30 # define MULTI_CACHE 1
36 #if defined(CONFIG_CPU_CACHE_V4)
38 # define MULTI_CACHE 1
44 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
46 # define MULTI_CACHE 1
49 #if defined(CONFIG_CPU_ARM926T)
51 # define MULTI_CACHE 1
53 # define _CACHE arm926
57 #if defined(CONFIG_CPU_ARM940T)
59 # define MULTI_CACHE 1
61 # define _CACHE arm940
65 #if defined(CONFIG_CPU_ARM946E)
67 # define MULTI_CACHE 1
69 # define _CACHE arm946
73 #if defined(CONFIG_CPU_CACHE_V4WB)
75 # define MULTI_CACHE 1
81 #if defined(CONFIG_CPU_XSCALE)
83 # define MULTI_CACHE 1
85 # define _CACHE xscale
89 #if defined(CONFIG_CPU_XSC3)
91 # define MULTI_CACHE 1
97 #if defined(CONFIG_CPU_V6)
99 # define MULTI_CACHE 1
105 #if !defined(_CACHE) && !defined(MULTI_CACHE)
106 #error Unknown cache maintainence model
110 * This flag is used to indicate that the page pointed to by a pte
111 * is dirty and requires cleaning before returning it to the user.
113 #define PG_dcache_dirty PG_arch_1
116 * MM Cache Management
117 * ===================
119 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
120 * implement these methods.
122 * Start addresses are inclusive and end addresses are exclusive;
123 * start addresses should be rounded down, end addresses up.
125 * See Documentation/cachetlb.txt for more information.
126 * Please note that the implementation of these, and the required
127 * effects are cache-type (VIVT/VIPT/PIPT) specific.
129 * flush_cache_kern_all()
131 * Unconditionally clean and invalidate the entire cache.
133 * flush_cache_user_mm(mm)
135 * Clean and invalidate all user space cache entries
136 * before a change of page tables.
138 * flush_cache_user_range(start, end, flags)
140 * Clean and invalidate a range of cache entries in the
141 * specified address space before a change of page tables.
142 * - start - user start address (inclusive, page aligned)
143 * - end - user end address (exclusive, page aligned)
144 * - flags - vma->vm_flags field
146 * coherent_kern_range(start, end)
148 * Ensure coherency between the Icache and the Dcache in the
149 * region described by start, end. If you have non-snooping
150 * Harvard caches, you need to implement this function.
151 * - start - virtual start address
152 * - end - virtual end address
154 * DMA Cache Coherency
155 * ===================
157 * dma_inv_range(start, end)
159 * Invalidate (discard) the specified virtual address range.
160 * May not write back any entries. If 'start' or 'end'
161 * are not cache line aligned, those lines must be written
163 * - start - virtual start address
164 * - end - virtual end address
166 * dma_clean_range(start, end)
168 * Clean (write back) the specified virtual address range.
169 * - start - virtual start address
170 * - end - virtual end address
172 * dma_flush_range(start, end)
174 * Clean and invalidate the specified virtual address range.
175 * - start - virtual start address
176 * - end - virtual end address
179 struct cpu_cache_fns {
180 void (*flush_kern_all)(void);
181 void (*flush_user_all)(void);
182 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
184 void (*coherent_kern_range)(unsigned long, unsigned long);
185 void (*coherent_user_range)(unsigned long, unsigned long);
186 void (*flush_kern_dcache_page)(void *);
188 void (*dma_inv_range)(const void *, const void *);
189 void (*dma_clean_range)(const void *, const void *);
190 void (*dma_flush_range)(const void *, const void *);
193 struct outer_cache_fns {
194 void (*inv_range)(unsigned long, unsigned long);
195 void (*clean_range)(unsigned long, unsigned long);
196 void (*flush_range)(unsigned long, unsigned long);
200 * Select the calling method
204 extern struct cpu_cache_fns cpu_cache;
206 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
207 #define __cpuc_flush_user_all cpu_cache.flush_user_all
208 #define __cpuc_flush_user_range cpu_cache.flush_user_range
209 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
210 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
211 #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
214 * These are private to the dma-mapping API. Do not use directly.
215 * Their sole purpose is to ensure that data held in the cache
216 * is visible to DMA, or data written by DMA to system memory is
217 * visible to the CPU.
219 #define dmac_inv_range cpu_cache.dma_inv_range
220 #define dmac_clean_range cpu_cache.dma_clean_range
221 #define dmac_flush_range cpu_cache.dma_flush_range
225 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
226 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
227 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
228 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
229 #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
230 #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
232 extern void __cpuc_flush_kern_all(void);
233 extern void __cpuc_flush_user_all(void);
234 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
235 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
236 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
237 extern void __cpuc_flush_dcache_page(void *);
240 * These are private to the dma-mapping API. Do not use directly.
241 * Their sole purpose is to ensure that data held in the cache
242 * is visible to DMA, or data written by DMA to system memory is
243 * visible to the CPU.
245 #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
246 #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
247 #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
249 extern void dmac_inv_range(const void *, const void *);
250 extern void dmac_clean_range(const void *, const void *);
251 extern void dmac_flush_range(const void *, const void *);
255 #ifdef CONFIG_OUTER_CACHE
257 extern struct outer_cache_fns outer_cache;
259 static inline void outer_inv_range(unsigned long start, unsigned long end)
261 if (outer_cache.inv_range)
262 outer_cache.inv_range(start, end);
264 static inline void outer_clean_range(unsigned long start, unsigned long end)
266 if (outer_cache.clean_range)
267 outer_cache.clean_range(start, end);
269 static inline void outer_flush_range(unsigned long start, unsigned long end)
271 if (outer_cache.flush_range)
272 outer_cache.flush_range(start, end);
277 static inline void outer_inv_range(unsigned long start, unsigned long end)
279 static inline void outer_clean_range(unsigned long start, unsigned long end)
281 static inline void outer_flush_range(unsigned long start, unsigned long end)
287 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
288 * vmalloc, ioremap etc) in kernel space for pages. Since the
289 * direct-mappings of these pages may contain cached data, we need
290 * to do a full cache flush to ensure that writebacks don't corrupt
291 * data placed into these pages via the new mappings.
293 #define flush_cache_vmap(start, end) flush_cache_all()
294 #define flush_cache_vunmap(start, end) flush_cache_all()
297 * Copy user data from/to a page which is mapped into a different
298 * processes address space. Really, we want to allow our "user
299 * space" model to handle this.
301 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
303 memcpy(dst, src, len); \
304 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
307 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
309 memcpy(dst, src, len); \
313 * Convert calls to our calling convention.
315 #define flush_cache_all() __cpuc_flush_kern_all()
316 #ifndef CONFIG_CPU_CACHE_VIPT
317 static inline void flush_cache_mm(struct mm_struct *mm)
319 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
320 __cpuc_flush_user_all();
324 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
326 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
327 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
332 flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
334 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
335 unsigned long addr = user_addr & PAGE_MASK;
336 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
341 flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
342 unsigned long uaddr, void *kaddr,
343 unsigned long len, int write)
345 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
346 unsigned long addr = (unsigned long)kaddr;
347 __cpuc_coherent_kern_range(addr, addr + len);
351 extern void flush_cache_mm(struct mm_struct *mm);
352 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
353 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
354 extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
355 unsigned long uaddr, void *kaddr,
356 unsigned long len, int write);
359 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
362 * flush_cache_user_range is used when we want to ensure that the
363 * Harvard caches are synchronised for the user space address range.
364 * This is used for the ARM private sys_cacheflush system call.
366 #define flush_cache_user_range(vma,start,end) \
367 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
370 * Perform necessary cache operations to ensure that data previously
371 * stored within this range of addresses can be executed by the CPU.
373 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
376 * Perform necessary cache operations to ensure that the TLB will
377 * see data written in the specified area.
379 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
382 * flush_dcache_page is used when the kernel has written to the page
383 * cache page at virtual address page->virtual.
385 * If this page isn't mapped (ie, page_mapping == NULL), or it might
386 * have userspace mappings, then we _must_ always clean + invalidate
387 * the dcache entries associated with the kernel mapping.
389 * Otherwise we can defer the operation, and clean the cache when we are
390 * about to change to user space. This is the same method as used on SPARC64.
391 * See update_mmu_cache for the user space part.
393 extern void flush_dcache_page(struct page *);
395 extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
397 #define ARCH_HAS_FLUSH_ANON_PAGE
398 static inline void flush_anon_page(struct vm_area_struct *vma,
399 struct page *page, unsigned long vmaddr)
401 extern void __flush_anon_page(struct vm_area_struct *vma,
402 struct page *, unsigned long);
404 __flush_anon_page(vma, page, vmaddr);
407 #define flush_dcache_mmap_lock(mapping) \
408 write_lock_irq(&(mapping)->tree_lock)
409 #define flush_dcache_mmap_unlock(mapping) \
410 write_unlock_irq(&(mapping)->tree_lock)
412 #define flush_icache_user_range(vma,page,addr,len) \
413 flush_dcache_page(page)
416 * We don't appear to need to do anything here. In fact, if we did, we'd
417 * duplicate cache flushing elsewhere performed by flush_dcache_page().
419 #define flush_icache_page(vma,page) do { } while (0)
421 #define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
422 #define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25))
423 #define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25))
424 #define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
425 #define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
427 #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
429 #define cache_is_vivt() 1
430 #define cache_is_vipt() 0
431 #define cache_is_vipt_nonaliasing() 0
432 #define cache_is_vipt_aliasing() 0
434 #elif defined(CONFIG_CPU_CACHE_VIPT)
436 #define cache_is_vivt() 0
437 #define cache_is_vipt() 1
438 #define cache_is_vipt_nonaliasing() \
440 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
441 __cacheid_vipt_nonaliasing(__val); \
444 #define cache_is_vipt_aliasing() \
446 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
447 __cacheid_vipt_aliasing(__val); \
452 #define cache_is_vivt() \
454 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
455 (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
458 #define cache_is_vipt() \
460 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
461 __cacheid_present(__val) && __cacheid_vipt(__val); \
464 #define cache_is_vipt_nonaliasing() \
466 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
467 __cacheid_present(__val) && \
468 __cacheid_vipt_nonaliasing(__val); \
471 #define cache_is_vipt_aliasing() \
473 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
474 __cacheid_present(__val) && \
475 __cacheid_vipt_aliasing(__val); \