2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 Maciej W. Rozycki
8 * Copyright (C) 2008 Thiemo Seufer
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
18 #include <asm/cacheops.h>
22 #include <asm/pgtable.h>
23 #include <asm/prefetch.h>
24 #include <asm/system.h>
25 #include <asm/bootinfo.h>
26 #include <asm/mipsregs.h>
27 #include <asm/mmu_context.h>
31 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
32 #include <asm/sibyte/sb1250.h>
33 #include <asm/sibyte/sb1250_regs.h>
34 #include <asm/sibyte/sb1250_dma.h>
39 /* Registers used in the assembled routines. */
52 /* Handle labels (which must be positive integers). */
54 label_clear_nopref = 1,
58 label_copy_pref_store,
61 UASM_L_LA(_clear_nopref)
62 UASM_L_LA(_clear_pref)
63 UASM_L_LA(_copy_nopref)
64 UASM_L_LA(_copy_pref_both)
65 UASM_L_LA(_copy_pref_store)
67 /* We need one branch and therefore one relocation per target label. */
68 static struct uasm_label __cpuinitdata labels[5];
69 static struct uasm_reloc __cpuinitdata relocs[5];
71 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
72 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
77 * R4000 128 bytes S-cache: 0x058 bytes
78 * R4600 v1.7: 0x05c bytes
79 * R4600 v2.0: 0x060 bytes
80 * With prefetching, 16 word strides 0x120 bytes
83 static u32 clear_page_array[0x120 / 4];
85 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
86 void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
88 void clear_page(void *page) __attribute__((alias("clear_page_array")));
91 EXPORT_SYMBOL(clear_page);
96 * R4000 128 bytes S-cache: 0x11c bytes
97 * R4600 v1.7: 0x080 bytes
98 * R4600 v2.0: 0x07c bytes
99 * With prefetching, 16 word strides 0x540 bytes
101 static u32 copy_page_array[0x540 / 4];
103 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
105 copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
107 void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
110 EXPORT_SYMBOL(copy_page);
113 static int pref_bias_clear_store __cpuinitdata;
114 static int pref_bias_copy_load __cpuinitdata;
115 static int pref_bias_copy_store __cpuinitdata;
117 static u32 pref_src_mode __cpuinitdata;
118 static u32 pref_dst_mode __cpuinitdata;
120 static int clear_word_size __cpuinitdata;
121 static int copy_word_size __cpuinitdata;
123 static int half_clear_loop_size __cpuinitdata;
124 static int half_copy_loop_size __cpuinitdata;
126 static int cache_line_size __cpuinitdata;
127 #define cache_line_mask() (cache_line_size - 1)
129 static inline void __cpuinit
130 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
132 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
134 uasm_i_lui(buf, T9, uasm_rel_hi(off));
135 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
137 uasm_i_addiu(buf, T9, ZERO, off);
138 uasm_i_daddu(buf, reg1, reg2, T9);
141 uasm_i_lui(buf, T9, uasm_rel_hi(off));
142 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
143 UASM_i_ADDU(buf, reg1, reg2, T9);
145 UASM_i_ADDIU(buf, reg1, reg2, off);
149 static void __cpuinit set_prefetch_parameters(void)
151 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
156 if (cpu_has_64bit_gp_regs)
162 * The pref's used here are using "streaming" hints, which cause the
163 * copied data to be kicked out of the cache sooner. A page copy often
164 * ends up copying a lot more data than is commonly used, so this seems
165 * to make sense in terms of reducing cache pollution, but I've no real
166 * performance data to back this up.
168 if (cpu_has_prefetch) {
170 * XXX: Most prefetch bias values in here are based on
173 cache_line_size = cpu_dcache_line_size();
174 switch (current_cpu_type()) {
176 /* TX49 supports only Pref_Load */
177 pref_bias_copy_load = 256;
182 * As a workaround for erratum G105 which make the
183 * PrepareForStore hint unusable we fall back to
184 * StoreRetained on the RM9000. Once it is known which
185 * versions of the RM9000 we'll be able to condition-
193 * Those values have been experimentally tuned for an
196 pref_bias_clear_store = 512;
197 pref_bias_copy_load = 256;
198 pref_bias_copy_store = 256;
199 pref_src_mode = Pref_LoadStreamed;
200 pref_dst_mode = Pref_StoreStreamed;
205 pref_bias_clear_store = 128;
206 pref_bias_copy_load = 128;
207 pref_bias_copy_store = 128;
209 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
212 if (current_cpu_type() == CPU_SB1 &&
213 (current_cpu_data.processor_id & 0xff) < 0x02) {
214 pref_src_mode = Pref_Load;
215 pref_dst_mode = Pref_Store;
217 pref_src_mode = Pref_LoadStreamed;
218 pref_dst_mode = Pref_StoreStreamed;
223 pref_bias_clear_store = 128;
224 pref_bias_copy_load = 256;
225 pref_bias_copy_store = 128;
226 pref_src_mode = Pref_LoadStreamed;
227 pref_dst_mode = Pref_PrepareForStore;
231 if (cpu_has_cache_cdex_s)
232 cache_line_size = cpu_scache_line_size();
233 else if (cpu_has_cache_cdex_p)
234 cache_line_size = cpu_dcache_line_size();
237 * Too much unrolling will overflow the available space in
238 * clear_space_array / copy_page_array.
240 half_clear_loop_size = min(16 * clear_word_size,
241 max(cache_line_size >> 1,
242 4 * clear_word_size));
243 half_copy_loop_size = min(16 * copy_word_size,
244 max(cache_line_size >> 1,
245 4 * copy_word_size));
248 static void __cpuinit build_clear_store(u32 **buf, int off)
250 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
251 uasm_i_sd(buf, ZERO, off, A0);
253 uasm_i_sw(buf, ZERO, off, A0);
257 static inline void __cpuinit build_clear_pref(u32 **buf, int off)
259 if (off & cache_line_mask())
262 if (pref_bias_clear_store) {
263 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
265 } else if (cache_line_size == (half_clear_loop_size << 1)) {
266 if (cpu_has_cache_cdex_s) {
267 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
268 } else if (cpu_has_cache_cdex_p) {
269 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
276 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
277 uasm_i_lw(buf, ZERO, ZERO, AT);
279 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
284 void __cpuinit build_clear_page(void)
287 u32 *buf = (u32 *)&clear_page_array;
288 struct uasm_label *l = labels;
289 struct uasm_reloc *r = relocs;
292 memset(labels, 0, sizeof(labels));
293 memset(relocs, 0, sizeof(relocs));
295 set_prefetch_parameters();
298 * This algorithm makes the following assumptions:
299 * - The prefetch bias is a multiple of 2 words.
300 * - The prefetch bias is less than one page.
302 BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
303 BUG_ON(PAGE_SIZE < pref_bias_clear_store);
305 off = PAGE_SIZE - pref_bias_clear_store;
306 if (off > 0xffff || !pref_bias_clear_store)
307 pg_addiu(&buf, A2, A0, off);
309 uasm_i_ori(&buf, A2, A0, off);
311 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
312 uasm_i_lui(&buf, AT, 0xa000);
314 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
315 * cache_line_size : 0;
317 build_clear_pref(&buf, -off);
318 off -= cache_line_size;
320 uasm_l_clear_pref(&l, buf);
322 build_clear_pref(&buf, off);
323 build_clear_store(&buf, off);
324 off += clear_word_size;
325 } while (off < half_clear_loop_size);
326 pg_addiu(&buf, A0, A0, 2 * off);
329 build_clear_pref(&buf, off);
330 if (off == -clear_word_size)
331 uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
332 build_clear_store(&buf, off);
333 off += clear_word_size;
336 if (pref_bias_clear_store) {
337 pg_addiu(&buf, A2, A0, pref_bias_clear_store);
338 uasm_l_clear_nopref(&l, buf);
341 build_clear_store(&buf, off);
342 off += clear_word_size;
343 } while (off < half_clear_loop_size);
344 pg_addiu(&buf, A0, A0, 2 * off);
347 if (off == -clear_word_size)
348 uasm_il_bne(&buf, &r, A0, A2,
350 build_clear_store(&buf, off);
351 off += clear_word_size;
358 BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
360 uasm_resolve_relocs(relocs, labels);
362 pr_debug("Synthesized clear page handler (%u instructions).\n",
363 (u32)(buf - clear_page_array));
365 pr_debug("\t.set push\n");
366 pr_debug("\t.set noreorder\n");
367 for (i = 0; i < (buf - clear_page_array); i++)
368 pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
369 pr_debug("\t.set pop\n");
372 static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
374 if (cpu_has_64bit_gp_regs) {
375 uasm_i_ld(buf, reg, off, A1);
377 uasm_i_lw(buf, reg, off, A1);
381 static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
383 if (cpu_has_64bit_gp_regs) {
384 uasm_i_sd(buf, reg, off, A0);
386 uasm_i_sw(buf, reg, off, A0);
390 static inline void build_copy_load_pref(u32 **buf, int off)
392 if (off & cache_line_mask())
395 if (pref_bias_copy_load)
396 uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
399 static inline void build_copy_store_pref(u32 **buf, int off)
401 if (off & cache_line_mask())
404 if (pref_bias_copy_store) {
405 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
407 } else if (cache_line_size == (half_copy_loop_size << 1)) {
408 if (cpu_has_cache_cdex_s) {
409 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
410 } else if (cpu_has_cache_cdex_p) {
411 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
418 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
419 uasm_i_lw(buf, ZERO, ZERO, AT);
421 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
426 void __cpuinit build_copy_page(void)
429 u32 *buf = (u32 *)©_page_array;
430 struct uasm_label *l = labels;
431 struct uasm_reloc *r = relocs;
434 memset(labels, 0, sizeof(labels));
435 memset(relocs, 0, sizeof(relocs));
437 set_prefetch_parameters();
440 * This algorithm makes the following assumptions:
441 * - All prefetch biases are multiples of 8 words.
442 * - The prefetch biases are less than one page.
443 * - The store prefetch bias isn't greater than the load
446 BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
447 BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
448 BUG_ON(PAGE_SIZE < pref_bias_copy_load);
449 BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
451 off = PAGE_SIZE - pref_bias_copy_load;
452 if (off > 0xffff || !pref_bias_copy_load)
453 pg_addiu(&buf, A2, A0, off);
455 uasm_i_ori(&buf, A2, A0, off);
457 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
458 uasm_i_lui(&buf, AT, 0xa000);
460 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
463 build_copy_load_pref(&buf, -off);
464 off -= cache_line_size;
466 off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
469 build_copy_store_pref(&buf, -off);
470 off -= cache_line_size;
472 uasm_l_copy_pref_both(&l, buf);
474 build_copy_load_pref(&buf, off);
475 build_copy_load(&buf, T0, off);
476 build_copy_load_pref(&buf, off + copy_word_size);
477 build_copy_load(&buf, T1, off + copy_word_size);
478 build_copy_load_pref(&buf, off + 2 * copy_word_size);
479 build_copy_load(&buf, T2, off + 2 * copy_word_size);
480 build_copy_load_pref(&buf, off + 3 * copy_word_size);
481 build_copy_load(&buf, T3, off + 3 * copy_word_size);
482 build_copy_store_pref(&buf, off);
483 build_copy_store(&buf, T0, off);
484 build_copy_store_pref(&buf, off + copy_word_size);
485 build_copy_store(&buf, T1, off + copy_word_size);
486 build_copy_store_pref(&buf, off + 2 * copy_word_size);
487 build_copy_store(&buf, T2, off + 2 * copy_word_size);
488 build_copy_store_pref(&buf, off + 3 * copy_word_size);
489 build_copy_store(&buf, T3, off + 3 * copy_word_size);
490 off += 4 * copy_word_size;
491 } while (off < half_copy_loop_size);
492 pg_addiu(&buf, A1, A1, 2 * off);
493 pg_addiu(&buf, A0, A0, 2 * off);
496 build_copy_load_pref(&buf, off);
497 build_copy_load(&buf, T0, off);
498 build_copy_load_pref(&buf, off + copy_word_size);
499 build_copy_load(&buf, T1, off + copy_word_size);
500 build_copy_load_pref(&buf, off + 2 * copy_word_size);
501 build_copy_load(&buf, T2, off + 2 * copy_word_size);
502 build_copy_load_pref(&buf, off + 3 * copy_word_size);
503 build_copy_load(&buf, T3, off + 3 * copy_word_size);
504 build_copy_store_pref(&buf, off);
505 build_copy_store(&buf, T0, off);
506 build_copy_store_pref(&buf, off + copy_word_size);
507 build_copy_store(&buf, T1, off + copy_word_size);
508 build_copy_store_pref(&buf, off + 2 * copy_word_size);
509 build_copy_store(&buf, T2, off + 2 * copy_word_size);
510 build_copy_store_pref(&buf, off + 3 * copy_word_size);
511 if (off == -(4 * copy_word_size))
512 uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
513 build_copy_store(&buf, T3, off + 3 * copy_word_size);
514 off += 4 * copy_word_size;
517 if (pref_bias_copy_load - pref_bias_copy_store) {
518 pg_addiu(&buf, A2, A0,
519 pref_bias_copy_load - pref_bias_copy_store);
520 uasm_l_copy_pref_store(&l, buf);
523 build_copy_load(&buf, T0, off);
524 build_copy_load(&buf, T1, off + copy_word_size);
525 build_copy_load(&buf, T2, off + 2 * copy_word_size);
526 build_copy_load(&buf, T3, off + 3 * copy_word_size);
527 build_copy_store_pref(&buf, off);
528 build_copy_store(&buf, T0, off);
529 build_copy_store_pref(&buf, off + copy_word_size);
530 build_copy_store(&buf, T1, off + copy_word_size);
531 build_copy_store_pref(&buf, off + 2 * copy_word_size);
532 build_copy_store(&buf, T2, off + 2 * copy_word_size);
533 build_copy_store_pref(&buf, off + 3 * copy_word_size);
534 build_copy_store(&buf, T3, off + 3 * copy_word_size);
535 off += 4 * copy_word_size;
536 } while (off < half_copy_loop_size);
537 pg_addiu(&buf, A1, A1, 2 * off);
538 pg_addiu(&buf, A0, A0, 2 * off);
541 build_copy_load(&buf, T0, off);
542 build_copy_load(&buf, T1, off + copy_word_size);
543 build_copy_load(&buf, T2, off + 2 * copy_word_size);
544 build_copy_load(&buf, T3, off + 3 * copy_word_size);
545 build_copy_store_pref(&buf, off);
546 build_copy_store(&buf, T0, off);
547 build_copy_store_pref(&buf, off + copy_word_size);
548 build_copy_store(&buf, T1, off + copy_word_size);
549 build_copy_store_pref(&buf, off + 2 * copy_word_size);
550 build_copy_store(&buf, T2, off + 2 * copy_word_size);
551 build_copy_store_pref(&buf, off + 3 * copy_word_size);
552 if (off == -(4 * copy_word_size))
553 uasm_il_bne(&buf, &r, A2, A0,
554 label_copy_pref_store);
555 build_copy_store(&buf, T3, off + 3 * copy_word_size);
556 off += 4 * copy_word_size;
560 if (pref_bias_copy_store) {
561 pg_addiu(&buf, A2, A0, pref_bias_copy_store);
562 uasm_l_copy_nopref(&l, buf);
565 build_copy_load(&buf, T0, off);
566 build_copy_load(&buf, T1, off + copy_word_size);
567 build_copy_load(&buf, T2, off + 2 * copy_word_size);
568 build_copy_load(&buf, T3, off + 3 * copy_word_size);
569 build_copy_store(&buf, T0, off);
570 build_copy_store(&buf, T1, off + copy_word_size);
571 build_copy_store(&buf, T2, off + 2 * copy_word_size);
572 build_copy_store(&buf, T3, off + 3 * copy_word_size);
573 off += 4 * copy_word_size;
574 } while (off < half_copy_loop_size);
575 pg_addiu(&buf, A1, A1, 2 * off);
576 pg_addiu(&buf, A0, A0, 2 * off);
579 build_copy_load(&buf, T0, off);
580 build_copy_load(&buf, T1, off + copy_word_size);
581 build_copy_load(&buf, T2, off + 2 * copy_word_size);
582 build_copy_load(&buf, T3, off + 3 * copy_word_size);
583 build_copy_store(&buf, T0, off);
584 build_copy_store(&buf, T1, off + copy_word_size);
585 build_copy_store(&buf, T2, off + 2 * copy_word_size);
586 if (off == -(4 * copy_word_size))
587 uasm_il_bne(&buf, &r, A2, A0,
589 build_copy_store(&buf, T3, off + 3 * copy_word_size);
590 off += 4 * copy_word_size;
597 BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
599 uasm_resolve_relocs(relocs, labels);
601 pr_debug("Synthesized copy page handler (%u instructions).\n",
602 (u32)(buf - copy_page_array));
604 pr_debug("\t.set push\n");
605 pr_debug("\t.set noreorder\n");
606 for (i = 0; i < (buf - copy_page_array); i++)
607 pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
608 pr_debug("\t.set pop\n");
611 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
614 * Pad descriptors to cacheline, since each is exclusively owned by a
622 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
624 void sb1_dma_init(void)
628 for (i = 0; i < DM_NUM_CHANNELS; i++) {
629 const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
630 V_DM_DSCR_BASE_RINGSZ(1);
631 void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
633 __raw_writeq(base_val, base_reg);
634 __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
635 __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
639 void clear_page(void *page)
641 u64 to_phys = CPHYSADDR((unsigned long)page);
642 unsigned int cpu = smp_processor_id();
644 /* if the page is not in KSEG0, use old way */
645 if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
646 return clear_page_cpu(page);
648 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
649 M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
650 page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
651 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
654 * Don't really want to do it this way, but there's no
655 * reliable way to delay completion detection.
657 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
658 & M_DM_DSCR_BASE_INTERRUPT))
660 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
663 void copy_page(void *to, void *from)
665 u64 from_phys = CPHYSADDR((unsigned long)from);
666 u64 to_phys = CPHYSADDR((unsigned long)to);
667 unsigned int cpu = smp_processor_id();
669 /* if any page is not in KSEG0, use old way */
670 if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
671 || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
672 return copy_page_cpu(to, from);
674 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
675 M_DM_DSCRA_INTERRUPT;
676 page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
677 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
680 * Don't really want to do it this way, but there's no
681 * reliable way to delay completion detection.
683 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
684 & M_DM_DSCR_BASE_INTERRUPT))
686 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
689 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */