This source file includes following definitions.
- arc_cache_mumbojumbo
- read_decode_cache_bcr_arcv2
- read_decode_cache_bcr
- __cache_line_loop_v2
- __cache_line_loop_v3
- __cache_line_loop_v4
- __cache_line_loop_v4
- __before_dc_op
- __before_dc_op
- __after_dc_op
- __dc_entire_op
- __dc_disable
- __dc_enable
- __dc_line_op
- __ic_entire_inv
- __ic_line_inv_vaddr_local
- __ic_line_inv_vaddr_helper
- __ic_line_inv_vaddr
- slc_op_rgn
- slc_op_line
- slc_entire_op
- arc_slc_disable
- arc_slc_enable
- flush_dcache_page
- __dma_cache_wback_inv_l1
- __dma_cache_inv_l1
- __dma_cache_wback_l1
- __dma_cache_wback_inv_slc
- __dma_cache_inv_slc
- __dma_cache_wback_slc
- dma_cache_wback_inv
- dma_cache_inv
- dma_cache_wback
- flush_icache_range
- __sync_icache_dcache
- __inv_icache_page
- __flush_dcache_page
- flush_cache_all
- flush_cache_mm
- flush_cache_page
- flush_cache_range
- flush_anon_page
- copy_user_highpage
- clear_user_page
- SYSCALL_DEFINE3
- arc_ioc_setup
- arc_cache_init_master
- arc_cache_init
1
2
3
4
5
6
7
8
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/cache.h>
13 #include <linux/mmu_context.h>
14 #include <linux/syscalls.h>
15 #include <linux/uaccess.h>
16 #include <linux/pagemap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cachectl.h>
19 #include <asm/setup.h>
20
21 #ifdef CONFIG_ISA_ARCV2
22 #define USE_RGN_FLSH 1
23 #endif
24
25 static int l2_line_sz;
26 static int ioc_exists;
27 int slc_enable = 1, ioc_enable = 1;
28 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE;
29 unsigned long perip_end = 0xFFFFFFFF;
30
31 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
32 unsigned long sz, const int op, const int full_page);
33
34 void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
35 void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
36 void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
37
38 char *arc_cache_mumbojumbo(int c, char *buf, int len)
39 {
40 int n = 0;
41 struct cpuinfo_arc_cache *p;
42
43 #define PR_CACHE(p, cfg, str) \
44 if (!(p)->line_len) \
45 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
46 else \
47 n += scnprintf(buf + n, len - n, \
48 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
49 (p)->sz_k, (p)->assoc, (p)->line_len, \
50 (p)->vipt ? "VIPT" : "PIPT", \
51 (p)->alias ? " aliasing" : "", \
52 IS_USED_CFG(cfg));
53
54 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
55 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
56
57 p = &cpuinfo_arc700[c].slc;
58 if (p->line_len)
59 n += scnprintf(buf + n, len - n,
60 "SLC\t\t: %uK, %uB Line%s\n",
61 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
62
63 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
64 perip_base,
65 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
66
67 return buf;
68 }
69
70
71
72
73
74
75 static void read_decode_cache_bcr_arcv2(int cpu)
76 {
77 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
78 struct bcr_generic sbcr;
79
80 struct bcr_slc_cfg {
81 #ifdef CONFIG_CPU_BIG_ENDIAN
82 unsigned int pad:24, way:2, lsz:2, sz:4;
83 #else
84 unsigned int sz:4, lsz:2, way:2, pad:24;
85 #endif
86 } slc_cfg;
87
88 struct bcr_clust_cfg {
89 #ifdef CONFIG_CPU_BIG_ENDIAN
90 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
91 #else
92 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
93 #endif
94 } cbcr;
95
96 struct bcr_volatile {
97 #ifdef CONFIG_CPU_BIG_ENDIAN
98 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
99 #else
100 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
101 #endif
102 } vol;
103
104
105 READ_BCR(ARC_REG_SLC_BCR, sbcr);
106 if (sbcr.ver) {
107 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
108 p_slc->sz_k = 128 << slc_cfg.sz;
109 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
110 }
111
112 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
113 if (cbcr.c) {
114 ioc_exists = 1;
115
116
117
118
119
120
121
122
123
124
125
126 if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
127 ioc_enable = 0;
128 } else {
129 ioc_enable = 0;
130 }
131
132
133 if (cpuinfo_arc700[cpu].core.family > 0x51) {
134 READ_BCR(AUX_VOL, vol);
135 perip_base = vol.start << 28;
136
137 if (cpuinfo_arc700[cpu].core.family > 0x52)
138 perip_end = (vol.limit << 28) - 1;
139 }
140 }
141
142 void read_decode_cache_bcr(void)
143 {
144 struct cpuinfo_arc_cache *p_ic, *p_dc;
145 unsigned int cpu = smp_processor_id();
146 struct bcr_cache {
147 #ifdef CONFIG_CPU_BIG_ENDIAN
148 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
149 #else
150 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
151 #endif
152 } ibcr, dbcr;
153
154 p_ic = &cpuinfo_arc700[cpu].icache;
155 READ_BCR(ARC_REG_IC_BCR, ibcr);
156
157 if (!ibcr.ver)
158 goto dc_chk;
159
160 if (ibcr.ver <= 3) {
161 BUG_ON(ibcr.config != 3);
162 p_ic->assoc = 2;
163 } else if (ibcr.ver >= 4) {
164 p_ic->assoc = 1 << ibcr.config;
165 }
166
167 p_ic->line_len = 8 << ibcr.line_len;
168 p_ic->sz_k = 1 << (ibcr.sz - 1);
169 p_ic->vipt = 1;
170 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
171
172 dc_chk:
173 p_dc = &cpuinfo_arc700[cpu].dcache;
174 READ_BCR(ARC_REG_DC_BCR, dbcr);
175
176 if (!dbcr.ver)
177 goto slc_chk;
178
179 if (dbcr.ver <= 3) {
180 BUG_ON(dbcr.config != 2);
181 p_dc->assoc = 4;
182 p_dc->vipt = 1;
183 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
184 } else if (dbcr.ver >= 4) {
185 p_dc->assoc = 1 << dbcr.config;
186 p_dc->vipt = 0;
187 p_dc->alias = 0;
188 }
189
190 p_dc->line_len = 16 << dbcr.line_len;
191 p_dc->sz_k = 1 << (dbcr.sz - 1);
192
193 slc_chk:
194 if (is_isa_arcv2())
195 read_decode_cache_bcr_arcv2(cpu);
196 }
197
198
199
200
201
202 #define OP_INV 0x1
203 #define OP_FLUSH 0x2
204 #define OP_FLUSH_N_INV 0x3
205 #define OP_INV_IC 0x4
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 static inline
250 void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
251 unsigned long sz, const int op, const int full_page)
252 {
253 unsigned int aux_cmd;
254 int num_lines;
255
256 if (op == OP_INV_IC) {
257 aux_cmd = ARC_REG_IC_IVIL;
258 } else {
259
260 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
261 }
262
263
264
265
266
267
268
269 if (!full_page) {
270 sz += paddr & ~CACHE_LINE_MASK;
271 paddr &= CACHE_LINE_MASK;
272 vaddr &= CACHE_LINE_MASK;
273 }
274
275 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
276
277
278 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
279
280 while (num_lines-- > 0) {
281 write_aux_reg(aux_cmd, paddr);
282 paddr += L1_CACHE_BYTES;
283 }
284 }
285
286
287
288
289
290
291
292
293
294 static inline
295 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
296 unsigned long sz, const int op, const int full_page)
297 {
298 unsigned int aux_cmd, aux_tag;
299 int num_lines;
300
301 if (op == OP_INV_IC) {
302 aux_cmd = ARC_REG_IC_IVIL;
303 aux_tag = ARC_REG_IC_PTAG;
304 } else {
305 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
306 aux_tag = ARC_REG_DC_PTAG;
307 }
308
309
310
311
312
313
314
315 if (!full_page) {
316 sz += paddr & ~CACHE_LINE_MASK;
317 paddr &= CACHE_LINE_MASK;
318 vaddr &= CACHE_LINE_MASK;
319 }
320 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
321
322
323
324
325
326 if (full_page)
327 write_aux_reg(aux_tag, paddr);
328
329
330
331
332
333
334
335
336 if (is_pae40_enabled() && op == OP_INV_IC)
337 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
338
339 while (num_lines-- > 0) {
340 if (!full_page) {
341 write_aux_reg(aux_tag, paddr);
342 paddr += L1_CACHE_BYTES;
343 }
344
345 write_aux_reg(aux_cmd, vaddr);
346 vaddr += L1_CACHE_BYTES;
347 }
348 }
349
350 #ifndef USE_RGN_FLSH
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365 static inline
366 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
367 unsigned long sz, const int op, const int full_page)
368 {
369 unsigned int aux_cmd;
370 int num_lines;
371
372 if (op == OP_INV_IC) {
373 aux_cmd = ARC_REG_IC_IVIL;
374 } else {
375
376 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
377 }
378
379
380
381
382
383
384
385 if (!full_page) {
386 sz += paddr & ~CACHE_LINE_MASK;
387 paddr &= CACHE_LINE_MASK;
388 }
389
390 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
391
392
393
394
395
396
397 if (is_pae40_enabled()) {
398 if (op == OP_INV_IC)
399
400
401
402
403 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
404 else
405 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
406 }
407
408 while (num_lines-- > 0) {
409 write_aux_reg(aux_cmd, paddr);
410 paddr += L1_CACHE_BYTES;
411 }
412 }
413
414 #else
415
416
417
418
419 static inline
420 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
421 unsigned long sz, const int op, const int full_page)
422 {
423 unsigned int s, e;
424
425
426 if (op == OP_INV_IC) {
427 s = ARC_REG_IC_IVIR;
428 e = ARC_REG_IC_ENDR;
429 } else {
430 s = ARC_REG_DC_STARTR;
431 e = ARC_REG_DC_ENDR;
432 }
433
434 if (!full_page) {
435
436 sz += paddr & ~CACHE_LINE_MASK;
437 paddr &= CACHE_LINE_MASK;
438
439
440
441
442
443 sz += L1_CACHE_BYTES - 1;
444 }
445
446 if (is_pae40_enabled()) {
447
448 if (op == OP_INV_IC)
449 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
450 else
451 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
452 }
453
454
455 write_aux_reg(e, paddr + sz);
456 write_aux_reg(s, paddr);
457
458
459 }
460
461 #endif
462
463 #if (CONFIG_ARC_MMU_VER < 3)
464 #define __cache_line_loop __cache_line_loop_v2
465 #elif (CONFIG_ARC_MMU_VER == 3)
466 #define __cache_line_loop __cache_line_loop_v3
467 #elif (CONFIG_ARC_MMU_VER > 3)
468 #define __cache_line_loop __cache_line_loop_v4
469 #endif
470
471 #ifdef CONFIG_ARC_HAS_DCACHE
472
473
474
475
476
477 #ifndef USE_RGN_FLSH
478
479
480
481
482 static inline void __before_dc_op(const int op)
483 {
484 if (op == OP_FLUSH_N_INV) {
485
486
487
488
489
490 const unsigned int ctl = ARC_REG_DC_CTRL;
491 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
492 }
493 }
494
495 #else
496
497 static inline void __before_dc_op(const int op)
498 {
499 const unsigned int ctl = ARC_REG_DC_CTRL;
500 unsigned int val = read_aux_reg(ctl);
501
502 if (op == OP_FLUSH_N_INV) {
503 val |= DC_CTRL_INV_MODE_FLUSH;
504 }
505
506 if (op != OP_INV_IC) {
507
508
509
510
511 val &= ~DC_CTRL_RGN_OP_MSK;
512 if (op & OP_INV)
513 val |= DC_CTRL_RGN_OP_INV;
514 }
515 write_aux_reg(ctl, val);
516 }
517
518 #endif
519
520
521 static inline void __after_dc_op(const int op)
522 {
523 if (op & OP_FLUSH) {
524 const unsigned int ctl = ARC_REG_DC_CTRL;
525 unsigned int reg;
526
527
528 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
529 ;
530
531
532 if (op == OP_FLUSH_N_INV)
533 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
534 }
535 }
536
537
538
539
540
541
542
543 static inline void __dc_entire_op(const int op)
544 {
545 int aux;
546
547 __before_dc_op(op);
548
549 if (op & OP_INV)
550 aux = ARC_REG_DC_IVDC;
551 else
552 aux = ARC_REG_DC_FLSH;
553
554 write_aux_reg(aux, 0x1);
555
556 __after_dc_op(op);
557 }
558
559 static inline void __dc_disable(void)
560 {
561 const int r = ARC_REG_DC_CTRL;
562
563 __dc_entire_op(OP_FLUSH_N_INV);
564 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
565 }
566
567 static void __dc_enable(void)
568 {
569 const int r = ARC_REG_DC_CTRL;
570
571 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
572 }
573
574
575 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
576
577
578
579
580 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
581 unsigned long sz, const int op)
582 {
583 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
584 unsigned long flags;
585
586 local_irq_save(flags);
587
588 __before_dc_op(op);
589
590 __cache_line_loop(paddr, vaddr, sz, op, full_page);
591
592 __after_dc_op(op);
593
594 local_irq_restore(flags);
595 }
596
597 #else
598
599 #define __dc_entire_op(op)
600 #define __dc_disable()
601 #define __dc_enable()
602 #define __dc_line_op(paddr, vaddr, sz, op)
603 #define __dc_line_op_k(paddr, sz, op)
604
605 #endif
606
607 #ifdef CONFIG_ARC_HAS_ICACHE
608
609 static inline void __ic_entire_inv(void)
610 {
611 write_aux_reg(ARC_REG_IC_IVIC, 1);
612 read_aux_reg(ARC_REG_IC_CTRL);
613 }
614
615 static inline void
616 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
617 unsigned long sz)
618 {
619 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
620 unsigned long flags;
621
622 local_irq_save(flags);
623 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
624 local_irq_restore(flags);
625 }
626
627 #ifndef CONFIG_SMP
628
629 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
630
631 #else
632
633 struct ic_inv_args {
634 phys_addr_t paddr, vaddr;
635 int sz;
636 };
637
638 static void __ic_line_inv_vaddr_helper(void *info)
639 {
640 struct ic_inv_args *ic_inv = info;
641
642 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
643 }
644
645 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
646 unsigned long sz)
647 {
648 struct ic_inv_args ic_inv = {
649 .paddr = paddr,
650 .vaddr = vaddr,
651 .sz = sz
652 };
653
654 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
655 }
656
657 #endif
658
659 #else
660
661 #define __ic_entire_inv()
662 #define __ic_line_inv_vaddr(pstart, vstart, sz)
663
664 #endif
665
666 noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
667 {
668 #ifdef CONFIG_ISA_ARCV2
669
670
671
672
673
674
675
676 static DEFINE_SPINLOCK(lock);
677 unsigned long flags;
678 unsigned int ctrl;
679 phys_addr_t end;
680
681 spin_lock_irqsave(&lock, flags);
682
683
684
685
686
687
688
689 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
690
691
692 if (!(op & OP_FLUSH))
693 ctrl &= ~SLC_CTRL_IM;
694 else
695 ctrl |= SLC_CTRL_IM;
696
697 if (op & OP_INV)
698 ctrl |= SLC_CTRL_RGN_OP_INV;
699 else
700 ctrl &= ~SLC_CTRL_RGN_OP_INV;
701
702 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
703
704
705
706
707
708
709 end = paddr + sz + l2_line_sz - 1;
710 if (is_pae40_enabled())
711 write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
712
713 write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
714
715 if (is_pae40_enabled())
716 write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
717
718 write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
719
720
721 read_aux_reg(ARC_REG_SLC_CTRL);
722
723 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
724
725 spin_unlock_irqrestore(&lock, flags);
726 #endif
727 }
728
729 noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
730 {
731 #ifdef CONFIG_ISA_ARCV2
732
733
734
735
736
737
738
739 static DEFINE_SPINLOCK(lock);
740
741 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
742 unsigned int ctrl, cmd;
743 unsigned long flags;
744 int num_lines;
745
746 spin_lock_irqsave(&lock, flags);
747
748 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
749
750
751 if (!(op & OP_FLUSH))
752 ctrl &= ~SLC_CTRL_IM;
753 else
754 ctrl |= SLC_CTRL_IM;
755
756 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
757
758 cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
759
760 sz += paddr & ~SLC_LINE_MASK;
761 paddr &= SLC_LINE_MASK;
762
763 num_lines = DIV_ROUND_UP(sz, l2_line_sz);
764
765 while (num_lines-- > 0) {
766 write_aux_reg(cmd, paddr);
767 paddr += l2_line_sz;
768 }
769
770
771 read_aux_reg(ARC_REG_SLC_CTRL);
772
773 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
774
775 spin_unlock_irqrestore(&lock, flags);
776 #endif
777 }
778
779 #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
780
781 noinline static void slc_entire_op(const int op)
782 {
783 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
784
785 ctrl = read_aux_reg(r);
786
787 if (!(op & OP_FLUSH))
788 ctrl &= ~SLC_CTRL_IM;
789 else
790 ctrl |= SLC_CTRL_IM;
791
792 write_aux_reg(r, ctrl);
793
794 if (op & OP_INV)
795 write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
796 else
797 write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
798
799
800 read_aux_reg(r);
801
802
803 while (read_aux_reg(r) & SLC_CTRL_BUSY);
804 }
805
806 static inline void arc_slc_disable(void)
807 {
808 const int r = ARC_REG_SLC_CTRL;
809
810 slc_entire_op(OP_FLUSH_N_INV);
811 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
812 }
813
814 static inline void arc_slc_enable(void)
815 {
816 const int r = ARC_REG_SLC_CTRL;
817
818 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
819 }
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837 void flush_dcache_page(struct page *page)
838 {
839 struct address_space *mapping;
840
841 if (!cache_is_vipt_aliasing()) {
842 clear_bit(PG_dc_clean, &page->flags);
843 return;
844 }
845
846
847 mapping = page_mapping_file(page);
848 if (!mapping)
849 return;
850
851
852
853
854
855 if (!mapping_mapped(mapping)) {
856 clear_bit(PG_dc_clean, &page->flags);
857 } else if (page_mapcount(page)) {
858
859
860 phys_addr_t paddr = (unsigned long)page_address(page);
861 unsigned long vaddr = page->index << PAGE_SHIFT;
862
863 if (addr_not_cache_congruent(paddr, vaddr))
864 __flush_dcache_page(paddr, vaddr);
865 }
866 }
867 EXPORT_SYMBOL(flush_dcache_page);
868
869
870
871
872
873 static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
874 {
875 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
876 }
877
878 static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
879 {
880 __dc_line_op_k(start, sz, OP_INV);
881 }
882
883 static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
884 {
885 __dc_line_op_k(start, sz, OP_FLUSH);
886 }
887
888
889
890
891
892 static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
893 {
894 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
895 slc_op(start, sz, OP_FLUSH_N_INV);
896 }
897
898 static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
899 {
900 __dc_line_op_k(start, sz, OP_INV);
901 slc_op(start, sz, OP_INV);
902 }
903
904 static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
905 {
906 __dc_line_op_k(start, sz, OP_FLUSH);
907 slc_op(start, sz, OP_FLUSH);
908 }
909
910
911
912
913 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
914 {
915 __dma_cache_wback_inv(start, sz);
916 }
917 EXPORT_SYMBOL(dma_cache_wback_inv);
918
919 void dma_cache_inv(phys_addr_t start, unsigned long sz)
920 {
921 __dma_cache_inv(start, sz);
922 }
923 EXPORT_SYMBOL(dma_cache_inv);
924
925 void dma_cache_wback(phys_addr_t start, unsigned long sz)
926 {
927 __dma_cache_wback(start, sz);
928 }
929 EXPORT_SYMBOL(dma_cache_wback);
930
931
932
933
934
935
936
937
938 void flush_icache_range(unsigned long kstart, unsigned long kend)
939 {
940 unsigned int tot_sz;
941
942 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
943
944
945
946
947 tot_sz = kend - kstart;
948 if (tot_sz > PAGE_SIZE) {
949 flush_cache_all();
950 return;
951 }
952
953
954 if (likely(kstart > PAGE_OFFSET)) {
955
956
957
958
959
960
961 __sync_icache_dcache(kstart, kstart, kend - kstart);
962 return;
963 }
964
965
966
967
968
969
970
971
972
973
974 while (tot_sz > 0) {
975 unsigned int off, sz;
976 unsigned long phy, pfn;
977
978 off = kstart % PAGE_SIZE;
979 pfn = vmalloc_to_pfn((void *)kstart);
980 phy = (pfn << PAGE_SHIFT) + off;
981 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
982 __sync_icache_dcache(phy, kstart, sz);
983 kstart += sz;
984 tot_sz -= sz;
985 }
986 }
987 EXPORT_SYMBOL(flush_icache_range);
988
989
990
991
992
993
994
995
996
997
998
999 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
1000 {
1001 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
1002 __ic_line_inv_vaddr(paddr, vaddr, len);
1003 }
1004
1005
1006 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
1007 {
1008 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
1009 }
1010
1011
1012
1013
1014
1015 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
1016 {
1017 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
1018 }
1019
1020 noinline void flush_cache_all(void)
1021 {
1022 unsigned long flags;
1023
1024 local_irq_save(flags);
1025
1026 __ic_entire_inv();
1027 __dc_entire_op(OP_FLUSH_N_INV);
1028
1029 local_irq_restore(flags);
1030
1031 }
1032
1033 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
1034
1035 void flush_cache_mm(struct mm_struct *mm)
1036 {
1037 flush_cache_all();
1038 }
1039
1040 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
1041 unsigned long pfn)
1042 {
1043 phys_addr_t paddr = pfn << PAGE_SHIFT;
1044
1045 u_vaddr &= PAGE_MASK;
1046
1047 __flush_dcache_page(paddr, u_vaddr);
1048
1049 if (vma->vm_flags & VM_EXEC)
1050 __inv_icache_page(paddr, u_vaddr);
1051 }
1052
1053 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
1054 unsigned long end)
1055 {
1056 flush_cache_all();
1057 }
1058
1059 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1060 unsigned long u_vaddr)
1061 {
1062
1063 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
1064 __flush_dcache_page((phys_addr_t)page_address(page),
1065 (phys_addr_t)page_address(page));
1066
1067 }
1068
1069 #endif
1070
1071 void copy_user_highpage(struct page *to, struct page *from,
1072 unsigned long u_vaddr, struct vm_area_struct *vma)
1073 {
1074 void *kfrom = kmap_atomic(from);
1075 void *kto = kmap_atomic(to);
1076 int clean_src_k_mappings = 0;
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
1090 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
1091 clean_src_k_mappings = 1;
1092 }
1093
1094 copy_page(kto, kfrom);
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 clear_bit(PG_dc_clean, &to->flags);
1105
1106
1107
1108
1109
1110 if (clean_src_k_mappings) {
1111 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
1112 set_bit(PG_dc_clean, &from->flags);
1113 } else {
1114 clear_bit(PG_dc_clean, &from->flags);
1115 }
1116
1117 kunmap_atomic(kto);
1118 kunmap_atomic(kfrom);
1119 }
1120
1121 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1122 {
1123 clear_page(to);
1124 clear_bit(PG_dc_clean, &page->flags);
1125 }
1126
1127
1128
1129
1130
1131
1132 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
1133 {
1134
1135 flush_cache_all();
1136 return 0;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 noinline void __init arc_ioc_setup(void)
1155 {
1156 unsigned int ioc_base, mem_sz;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
1167 panic("IOC already enabled, please upgrade bootloader!\n");
1168
1169 if (!ioc_enable)
1170 return;
1171
1172
1173 __dc_disable();
1174
1175
1176 if (read_aux_reg(ARC_REG_SLC_BCR))
1177 slc_entire_op(OP_FLUSH_N_INV);
1178
1179
1180
1181
1182
1183
1184 mem_sz = arc_get_mem_sz();
1185
1186 if (!is_power_of_2(mem_sz) || mem_sz < 4096)
1187 panic("IOC Aperture size must be power of 2 larger than 4KB");
1188
1189
1190
1191
1192
1193 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1194
1195
1196 ioc_base = CONFIG_LINUX_RAM_BASE;
1197
1198 if (ioc_base % mem_sz != 0)
1199 panic("IOC Aperture start must be aligned to the size of the aperture");
1200
1201 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
1202 write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
1203 write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
1204
1205
1206 __dc_enable();
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216 void __init arc_cache_init_master(void)
1217 {
1218 unsigned int __maybe_unused cpu = smp_processor_id();
1219
1220 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1221 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1222
1223 if (!ic->line_len)
1224 panic("cache support enabled but non-existent cache\n");
1225
1226 if (ic->line_len != L1_CACHE_BYTES)
1227 panic("ICache line [%d] != kernel Config [%d]",
1228 ic->line_len, L1_CACHE_BYTES);
1229
1230
1231
1232
1233
1234 if (is_isa_arcv2() && ic->alias)
1235 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1236 else
1237 _cache_line_loop_ic_fn = __cache_line_loop;
1238 }
1239
1240 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1241 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1242
1243 if (!dc->line_len)
1244 panic("cache support enabled but non-existent cache\n");
1245
1246 if (dc->line_len != L1_CACHE_BYTES)
1247 panic("DCache line [%d] != kernel Config [%d]",
1248 dc->line_len, L1_CACHE_BYTES);
1249
1250
1251 if (is_isa_arcompact()) {
1252 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1253 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1254
1255 if (dc->alias) {
1256 if (!handled)
1257 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1258 if (CACHE_COLORS_NUM != num_colors)
1259 panic("CACHE_COLORS_NUM not optimized for config\n");
1260 } else if (!dc->alias && handled) {
1261 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1262 }
1263 }
1264 }
1265
1266
1267
1268
1269
1270 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1271 "SMP_CACHE_BYTES must be >= any cache line length");
1272 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1273 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1274 l2_line_sz, SMP_CACHE_BYTES);
1275
1276
1277 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1278 arc_slc_disable();
1279
1280 if (is_isa_arcv2() && ioc_exists)
1281 arc_ioc_setup();
1282
1283 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1284 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1285 __dma_cache_inv = __dma_cache_inv_slc;
1286 __dma_cache_wback = __dma_cache_wback_slc;
1287 } else {
1288 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1289 __dma_cache_inv = __dma_cache_inv_l1;
1290 __dma_cache_wback = __dma_cache_wback_l1;
1291 }
1292
1293
1294
1295
1296
1297
1298 }
1299
1300 void __ref arc_cache_init(void)
1301 {
1302 unsigned int __maybe_unused cpu = smp_processor_id();
1303 char str[256];
1304
1305 pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
1306
1307 if (!cpu)
1308 arc_cache_init_master();
1309
1310
1311
1312
1313
1314
1315
1316
1317 if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1318
1319 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1320 write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1321
1322 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
1323 write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1324
1325 if (l2_line_sz) {
1326 write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1327 write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1328 }
1329 }
1330 }