2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 Maciej W. Rozycki
8 * Copyright (C) 2008 Thiemo Seufer
9 * Copyright (C) 2012 MIPS Technologies, Inc.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-type.h>
24 #include <asm/pgtable.h>
25 #include <asm/prefetch.h>
26 #include <asm/bootinfo.h>
27 #include <asm/mipsregs.h>
28 #include <asm/mmu_context.h>
32 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
33 #include <asm/sibyte/sb1250.h>
34 #include <asm/sibyte/sb1250_regs.h>
35 #include <asm/sibyte/sb1250_dma.h>
40 /* Registers used in the assembled routines. */
53 /* Handle labels (which must be positive integers). */
55 label_clear_nopref = 1,
59 label_copy_pref_store,
62 UASM_L_LA(_clear_nopref)
63 UASM_L_LA(_clear_pref)
64 UASM_L_LA(_copy_nopref)
65 UASM_L_LA(_copy_pref_both)
66 UASM_L_LA(_copy_pref_store)
68 /* We need one branch and therefore one relocation per target label. */
69 static struct uasm_label labels[5];
70 static struct uasm_reloc relocs[5];
72 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
76 * R6 has a limited offset of the pref instruction.
77 * Skip it if the offset is more than 9 bits.
79 #define _uasm_i_pref(a, b, c, d) \
81 if (cpu_has_mips_r6) { \
82 if (c <= 0xff && c >= -0x100) \
83 uasm_i_pref(a, b, c, d);\
85 uasm_i_pref(a, b, c, d); \
89 static int pref_bias_clear_store;
90 static int pref_bias_copy_load;
91 static int pref_bias_copy_store;
93 static u32 pref_src_mode;
94 static u32 pref_dst_mode;
96 static int clear_word_size;
97 static int copy_word_size;
99 static int half_clear_loop_size;
100 static int half_copy_loop_size;
102 static int cache_line_size;
103 #define cache_line_mask() (cache_line_size - 1)
106 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
108 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
110 uasm_i_lui(buf, T9, uasm_rel_hi(off));
111 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
113 uasm_i_addiu(buf, T9, ZERO, off);
114 uasm_i_daddu(buf, reg1, reg2, T9);
117 uasm_i_lui(buf, T9, uasm_rel_hi(off));
118 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
119 UASM_i_ADDU(buf, reg1, reg2, T9);
121 UASM_i_ADDIU(buf, reg1, reg2, off);
125 static void set_prefetch_parameters(void)
127 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
132 if (cpu_has_64bit_gp_regs)
138 * The pref's used here are using "streaming" hints, which cause the
139 * copied data to be kicked out of the cache sooner. A page copy often
140 * ends up copying a lot more data than is commonly used, so this seems
141 * to make sense in terms of reducing cache pollution, but I've no real
142 * performance data to back this up.
144 if (cpu_has_prefetch) {
146 * XXX: Most prefetch bias values in here are based on
149 cache_line_size = cpu_dcache_line_size();
150 switch (current_cpu_type()) {
153 /* These processors only support the Pref_Load. */
154 pref_bias_copy_load = 256;
161 * Those values have been experimentally tuned for an
164 pref_bias_clear_store = 512;
165 pref_bias_copy_load = 256;
166 pref_bias_copy_store = 256;
167 pref_src_mode = Pref_LoadStreamed;
168 pref_dst_mode = Pref_StoreStreamed;
173 pref_bias_clear_store = 128;
174 pref_bias_copy_load = 128;
175 pref_bias_copy_store = 128;
177 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
180 if (current_cpu_type() == CPU_SB1 &&
181 (current_cpu_data.processor_id & 0xff) < 0x02) {
182 pref_src_mode = Pref_Load;
183 pref_dst_mode = Pref_Store;
185 pref_src_mode = Pref_LoadStreamed;
186 pref_dst_mode = Pref_StoreStreamed;
191 pref_bias_clear_store = 128;
192 pref_bias_copy_load = 256;
193 pref_bias_copy_store = 128;
194 pref_src_mode = Pref_LoadStreamed;
197 * Bit 30 (Pref_PrepareForStore) has been
198 * removed from MIPS R6. Use bit 5
199 * (Pref_StoreStreamed).
201 pref_dst_mode = Pref_StoreStreamed;
203 pref_dst_mode = Pref_PrepareForStore;
207 if (cpu_has_cache_cdex_s)
208 cache_line_size = cpu_scache_line_size();
209 else if (cpu_has_cache_cdex_p)
210 cache_line_size = cpu_dcache_line_size();
213 * Too much unrolling will overflow the available space in
214 * clear_space_array / copy_page_array.
216 half_clear_loop_size = min(16 * clear_word_size,
217 max(cache_line_size >> 1,
218 4 * clear_word_size));
219 half_copy_loop_size = min(16 * copy_word_size,
220 max(cache_line_size >> 1,
221 4 * copy_word_size));
224 static void build_clear_store(u32 **buf, int off)
226 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
227 uasm_i_sd(buf, ZERO, off, A0);
229 uasm_i_sw(buf, ZERO, off, A0);
233 static inline void build_clear_pref(u32 **buf, int off)
235 if (off & cache_line_mask())
238 if (pref_bias_clear_store) {
239 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
241 } else if (cache_line_size == (half_clear_loop_size << 1)) {
242 if (cpu_has_cache_cdex_s) {
243 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
244 } else if (cpu_has_cache_cdex_p) {
245 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
252 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
253 uasm_i_lw(buf, ZERO, ZERO, AT);
255 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
260 extern u32 __clear_page_start;
261 extern u32 __clear_page_end;
262 extern u32 __copy_page_start;
263 extern u32 __copy_page_end;
265 void build_clear_page(void)
268 u32 *buf = &__clear_page_start;
269 struct uasm_label *l = labels;
270 struct uasm_reloc *r = relocs;
272 static atomic_t run_once = ATOMIC_INIT(0);
274 if (atomic_xchg(&run_once, 1)) {
278 memset(labels, 0, sizeof(labels));
279 memset(relocs, 0, sizeof(relocs));
281 set_prefetch_parameters();
284 * This algorithm makes the following assumptions:
285 * - The prefetch bias is a multiple of 2 words.
286 * - The prefetch bias is less than one page.
288 BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
289 BUG_ON(PAGE_SIZE < pref_bias_clear_store);
291 off = PAGE_SIZE - pref_bias_clear_store;
292 if (off > 0xffff || !pref_bias_clear_store)
293 pg_addiu(&buf, A2, A0, off);
295 uasm_i_ori(&buf, A2, A0, off);
297 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
298 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
300 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
301 * cache_line_size : 0;
303 build_clear_pref(&buf, -off);
304 off -= cache_line_size;
306 uasm_l_clear_pref(&l, buf);
308 build_clear_pref(&buf, off);
309 build_clear_store(&buf, off);
310 off += clear_word_size;
311 } while (off < half_clear_loop_size);
312 pg_addiu(&buf, A0, A0, 2 * off);
315 build_clear_pref(&buf, off);
316 if (off == -clear_word_size)
317 uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
318 build_clear_store(&buf, off);
319 off += clear_word_size;
322 if (pref_bias_clear_store) {
323 pg_addiu(&buf, A2, A0, pref_bias_clear_store);
324 uasm_l_clear_nopref(&l, buf);
327 build_clear_store(&buf, off);
328 off += clear_word_size;
329 } while (off < half_clear_loop_size);
330 pg_addiu(&buf, A0, A0, 2 * off);
333 if (off == -clear_word_size)
334 uasm_il_bne(&buf, &r, A0, A2,
336 build_clear_store(&buf, off);
337 off += clear_word_size;
344 BUG_ON(buf > &__clear_page_end);
346 uasm_resolve_relocs(relocs, labels);
348 pr_debug("Synthesized clear page handler (%u instructions).\n",
349 (u32)(buf - &__clear_page_start));
351 pr_debug("\t.set push\n");
352 pr_debug("\t.set noreorder\n");
353 for (i = 0; i < (buf - &__clear_page_start); i++)
354 pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
355 pr_debug("\t.set pop\n");
358 static void build_copy_load(u32 **buf, int reg, int off)
360 if (cpu_has_64bit_gp_regs) {
361 uasm_i_ld(buf, reg, off, A1);
363 uasm_i_lw(buf, reg, off, A1);
367 static void build_copy_store(u32 **buf, int reg, int off)
369 if (cpu_has_64bit_gp_regs) {
370 uasm_i_sd(buf, reg, off, A0);
372 uasm_i_sw(buf, reg, off, A0);
376 static inline void build_copy_load_pref(u32 **buf, int off)
378 if (off & cache_line_mask())
381 if (pref_bias_copy_load)
382 _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
385 static inline void build_copy_store_pref(u32 **buf, int off)
387 if (off & cache_line_mask())
390 if (pref_bias_copy_store) {
391 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
393 } else if (cache_line_size == (half_copy_loop_size << 1)) {
394 if (cpu_has_cache_cdex_s) {
395 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
396 } else if (cpu_has_cache_cdex_p) {
397 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
404 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
405 uasm_i_lw(buf, ZERO, ZERO, AT);
407 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
412 void build_copy_page(void)
415 u32 *buf = &__copy_page_start;
416 struct uasm_label *l = labels;
417 struct uasm_reloc *r = relocs;
419 static atomic_t run_once = ATOMIC_INIT(0);
421 if (atomic_xchg(&run_once, 1)) {
425 memset(labels, 0, sizeof(labels));
426 memset(relocs, 0, sizeof(relocs));
428 set_prefetch_parameters();
431 * This algorithm makes the following assumptions:
432 * - All prefetch biases are multiples of 8 words.
433 * - The prefetch biases are less than one page.
434 * - The store prefetch bias isn't greater than the load
437 BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
438 BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
439 BUG_ON(PAGE_SIZE < pref_bias_copy_load);
440 BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
442 off = PAGE_SIZE - pref_bias_copy_load;
443 if (off > 0xffff || !pref_bias_copy_load)
444 pg_addiu(&buf, A2, A0, off);
446 uasm_i_ori(&buf, A2, A0, off);
448 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
449 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
451 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
454 build_copy_load_pref(&buf, -off);
455 off -= cache_line_size;
457 off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
460 build_copy_store_pref(&buf, -off);
461 off -= cache_line_size;
463 uasm_l_copy_pref_both(&l, buf);
465 build_copy_load_pref(&buf, off);
466 build_copy_load(&buf, T0, off);
467 build_copy_load_pref(&buf, off + copy_word_size);
468 build_copy_load(&buf, T1, off + copy_word_size);
469 build_copy_load_pref(&buf, off + 2 * copy_word_size);
470 build_copy_load(&buf, T2, off + 2 * copy_word_size);
471 build_copy_load_pref(&buf, off + 3 * copy_word_size);
472 build_copy_load(&buf, T3, off + 3 * copy_word_size);
473 build_copy_store_pref(&buf, off);
474 build_copy_store(&buf, T0, off);
475 build_copy_store_pref(&buf, off + copy_word_size);
476 build_copy_store(&buf, T1, off + copy_word_size);
477 build_copy_store_pref(&buf, off + 2 * copy_word_size);
478 build_copy_store(&buf, T2, off + 2 * copy_word_size);
479 build_copy_store_pref(&buf, off + 3 * copy_word_size);
480 build_copy_store(&buf, T3, off + 3 * copy_word_size);
481 off += 4 * copy_word_size;
482 } while (off < half_copy_loop_size);
483 pg_addiu(&buf, A1, A1, 2 * off);
484 pg_addiu(&buf, A0, A0, 2 * off);
487 build_copy_load_pref(&buf, off);
488 build_copy_load(&buf, T0, off);
489 build_copy_load_pref(&buf, off + copy_word_size);
490 build_copy_load(&buf, T1, off + copy_word_size);
491 build_copy_load_pref(&buf, off + 2 * copy_word_size);
492 build_copy_load(&buf, T2, off + 2 * copy_word_size);
493 build_copy_load_pref(&buf, off + 3 * copy_word_size);
494 build_copy_load(&buf, T3, off + 3 * copy_word_size);
495 build_copy_store_pref(&buf, off);
496 build_copy_store(&buf, T0, off);
497 build_copy_store_pref(&buf, off + copy_word_size);
498 build_copy_store(&buf, T1, off + copy_word_size);
499 build_copy_store_pref(&buf, off + 2 * copy_word_size);
500 build_copy_store(&buf, T2, off + 2 * copy_word_size);
501 build_copy_store_pref(&buf, off + 3 * copy_word_size);
502 if (off == -(4 * copy_word_size))
503 uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
504 build_copy_store(&buf, T3, off + 3 * copy_word_size);
505 off += 4 * copy_word_size;
508 if (pref_bias_copy_load - pref_bias_copy_store) {
509 pg_addiu(&buf, A2, A0,
510 pref_bias_copy_load - pref_bias_copy_store);
511 uasm_l_copy_pref_store(&l, buf);
514 build_copy_load(&buf, T0, off);
515 build_copy_load(&buf, T1, off + copy_word_size);
516 build_copy_load(&buf, T2, off + 2 * copy_word_size);
517 build_copy_load(&buf, T3, off + 3 * copy_word_size);
518 build_copy_store_pref(&buf, off);
519 build_copy_store(&buf, T0, off);
520 build_copy_store_pref(&buf, off + copy_word_size);
521 build_copy_store(&buf, T1, off + copy_word_size);
522 build_copy_store_pref(&buf, off + 2 * copy_word_size);
523 build_copy_store(&buf, T2, off + 2 * copy_word_size);
524 build_copy_store_pref(&buf, off + 3 * copy_word_size);
525 build_copy_store(&buf, T3, off + 3 * copy_word_size);
526 off += 4 * copy_word_size;
527 } while (off < half_copy_loop_size);
528 pg_addiu(&buf, A1, A1, 2 * off);
529 pg_addiu(&buf, A0, A0, 2 * off);
532 build_copy_load(&buf, T0, off);
533 build_copy_load(&buf, T1, off + copy_word_size);
534 build_copy_load(&buf, T2, off + 2 * copy_word_size);
535 build_copy_load(&buf, T3, off + 3 * copy_word_size);
536 build_copy_store_pref(&buf, off);
537 build_copy_store(&buf, T0, off);
538 build_copy_store_pref(&buf, off + copy_word_size);
539 build_copy_store(&buf, T1, off + copy_word_size);
540 build_copy_store_pref(&buf, off + 2 * copy_word_size);
541 build_copy_store(&buf, T2, off + 2 * copy_word_size);
542 build_copy_store_pref(&buf, off + 3 * copy_word_size);
543 if (off == -(4 * copy_word_size))
544 uasm_il_bne(&buf, &r, A2, A0,
545 label_copy_pref_store);
546 build_copy_store(&buf, T3, off + 3 * copy_word_size);
547 off += 4 * copy_word_size;
551 if (pref_bias_copy_store) {
552 pg_addiu(&buf, A2, A0, pref_bias_copy_store);
553 uasm_l_copy_nopref(&l, buf);
556 build_copy_load(&buf, T0, off);
557 build_copy_load(&buf, T1, off + copy_word_size);
558 build_copy_load(&buf, T2, off + 2 * copy_word_size);
559 build_copy_load(&buf, T3, off + 3 * copy_word_size);
560 build_copy_store(&buf, T0, off);
561 build_copy_store(&buf, T1, off + copy_word_size);
562 build_copy_store(&buf, T2, off + 2 * copy_word_size);
563 build_copy_store(&buf, T3, off + 3 * copy_word_size);
564 off += 4 * copy_word_size;
565 } while (off < half_copy_loop_size);
566 pg_addiu(&buf, A1, A1, 2 * off);
567 pg_addiu(&buf, A0, A0, 2 * off);
570 build_copy_load(&buf, T0, off);
571 build_copy_load(&buf, T1, off + copy_word_size);
572 build_copy_load(&buf, T2, off + 2 * copy_word_size);
573 build_copy_load(&buf, T3, off + 3 * copy_word_size);
574 build_copy_store(&buf, T0, off);
575 build_copy_store(&buf, T1, off + copy_word_size);
576 build_copy_store(&buf, T2, off + 2 * copy_word_size);
577 if (off == -(4 * copy_word_size))
578 uasm_il_bne(&buf, &r, A2, A0,
580 build_copy_store(&buf, T3, off + 3 * copy_word_size);
581 off += 4 * copy_word_size;
588 BUG_ON(buf > &__copy_page_end);
590 uasm_resolve_relocs(relocs, labels);
592 pr_debug("Synthesized copy page handler (%u instructions).\n",
593 (u32)(buf - &__copy_page_start));
595 pr_debug("\t.set push\n");
596 pr_debug("\t.set noreorder\n");
597 for (i = 0; i < (buf - &__copy_page_start); i++)
598 pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
599 pr_debug("\t.set pop\n");
602 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
603 extern void clear_page_cpu(void *page);
604 extern void copy_page_cpu(void *to, void *from);
607 * Pad descriptors to cacheline, since each is exclusively owned by a
615 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
617 void sb1_dma_init(void)
621 for (i = 0; i < DM_NUM_CHANNELS; i++) {
622 const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
623 V_DM_DSCR_BASE_RINGSZ(1);
624 void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
626 __raw_writeq(base_val, base_reg);
627 __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
628 __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
632 void clear_page(void *page)
634 u64 to_phys = CPHYSADDR((unsigned long)page);
635 unsigned int cpu = smp_processor_id();
637 /* if the page is not in KSEG0, use old way */
638 if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
639 return clear_page_cpu(page);
641 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
642 M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
643 page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
644 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
647 * Don't really want to do it this way, but there's no
648 * reliable way to delay completion detection.
650 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
651 & M_DM_DSCR_BASE_INTERRUPT))
653 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
656 void copy_page(void *to, void *from)
658 u64 from_phys = CPHYSADDR((unsigned long)from);
659 u64 to_phys = CPHYSADDR((unsigned long)to);
660 unsigned int cpu = smp_processor_id();
662 /* if any page is not in KSEG0, use old way */
663 if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
664 || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
665 return copy_page_cpu(to, from);
667 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
668 M_DM_DSCRA_INTERRUPT;
669 page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
670 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
673 * Don't really want to do it this way, but there's no
674 * reliable way to delay completion detection.
676 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
677 & M_DM_DSCR_BASE_INTERRUPT))
679 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
682 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */