1/*
2 * arch/sh/mm/cache-sh5.c
3 *
4 * Copyright (C) 2000, 2001  Paolo Alberelli
5 * Copyright (C) 2002  Benedict Gaster
6 * Copyright (C) 2003  Richard Curnow
7 * Copyright (C) 2003 - 2008  Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License.  See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
16#include <asm/tlb.h>
17#include <asm/processor.h>
18#include <asm/cache.h>
19#include <asm/pgalloc.h>
20#include <asm/uaccess.h>
21#include <asm/mmu_context.h>
22
23extern void __weak sh4__flush_region_init(void);
24
25/* Wired TLB entry for the D-cache */
26static unsigned long long dtlb_cache_slot;
27
28/*
29 * The following group of functions deal with mapping and unmapping a
30 * temporary page into a DTLB slot that has been set aside for exclusive
31 * use.
32 */
33static inline void
34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
35			   unsigned long paddr)
36{
37	local_irq_disable();
38	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
39}
40
41static inline void sh64_teardown_dtlb_cache_slot(void)
42{
43	sh64_teardown_tlb_slot(dtlb_cache_slot);
44	local_irq_enable();
45}
46
47static inline void sh64_icache_inv_all(void)
48{
49	unsigned long long addr, flag, data;
50	unsigned long flags;
51
52	addr = ICCR0;
53	flag = ICCR0_ICI;
54	data = 0;
55
56	/* Make this a critical section for safety (probably not strictly necessary.) */
57	local_irq_save(flags);
58
59	/* Without %1 it gets unexplicably wrong */
60	__asm__ __volatile__ (
61		"getcfg	%3, 0, %0\n\t"
62		"or	%0, %2, %0\n\t"
63		"putcfg	%3, 0, %0\n\t"
64		"synci"
65		: "=&r" (data)
66		: "0" (data), "r" (flag), "r" (addr));
67
68	local_irq_restore(flags);
69}
70
71static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
72{
73	/* Invalidate range of addresses [start,end] from the I-cache, where
74	 * the addresses lie in the kernel superpage. */
75
76	unsigned long long ullend, addr, aligned_start;
77	aligned_start = (unsigned long long)(signed long long)(signed long) start;
78	addr = L1_CACHE_ALIGN(aligned_start);
79	ullend = (unsigned long long) (signed long long) (signed long) end;
80
81	while (addr <= ullend) {
82		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
83		addr += L1_CACHE_BYTES;
84	}
85}
86
87static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
88{
89	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
90	   Also, eaddr is page-aligned. */
91	unsigned int cpu = smp_processor_id();
92	unsigned long long addr, end_addr;
93	unsigned long flags = 0;
94	unsigned long running_asid, vma_asid;
95	addr = eaddr;
96	end_addr = addr + PAGE_SIZE;
97
98	/* Check whether we can use the current ASID for the I-cache
99	   invalidation.  For example, if we're called via
100	   access_process_vm->flush_cache_page->here, (e.g. when reading from
101	   /proc), 'running_asid' will be that of the reader, not of the
102	   victim.
103
104	   Also, note the risk that we might get pre-empted between the ASID
105	   compare and blocking IRQs, and before we regain control, the
106	   pid->ASID mapping changes.  However, the whole cache will get
107	   invalidated when the mapping is renewed, so the worst that can
108	   happen is that the loop below ends up invalidating somebody else's
109	   cache entries.
110	*/
111
112	running_asid = get_asid();
113	vma_asid = cpu_asid(cpu, vma->vm_mm);
114	if (running_asid != vma_asid) {
115		local_irq_save(flags);
116		switch_and_save_asid(vma_asid);
117	}
118	while (addr < end_addr) {
119		/* Worth unrolling a little */
120		__asm__ __volatile__("icbi %0,  0" : : "r" (addr));
121		__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
122		__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
123		__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
124		addr += 128;
125	}
126	if (running_asid != vma_asid) {
127		switch_and_save_asid(running_asid);
128		local_irq_restore(flags);
129	}
130}
131
132static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
133			  unsigned long start, unsigned long end)
134{
135	/* Used for invalidating big chunks of I-cache, i.e. assume the range
136	   is whole pages.  If 'start' or 'end' is not page aligned, the code
137	   is conservative and invalidates to the ends of the enclosing pages.
138	   This is functionally OK, just a performance loss. */
139
140	/* See the comments below in sh64_dcache_purge_user_range() regarding
141	   the choice of algorithm.  However, for the I-cache option (2) isn't
142	   available because there are no physical tags so aliases can't be
143	   resolved.  The icbi instruction has to be used through the user
144	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
145	   would be cheaper to use the selective code for a large range than is
146	   possible with the D-cache.  Just assume 64 for now as a working
147	   figure.
148	   */
149	int n_pages;
150
151	if (!mm)
152		return;
153
154	n_pages = ((end - start) >> PAGE_SHIFT);
155	if (n_pages >= 64) {
156		sh64_icache_inv_all();
157	} else {
158		unsigned long aligned_start;
159		unsigned long eaddr;
160		unsigned long after_last_page_start;
161		unsigned long mm_asid, current_asid;
162		unsigned long flags = 0;
163
164		mm_asid = cpu_asid(smp_processor_id(), mm);
165		current_asid = get_asid();
166
167		if (mm_asid != current_asid) {
168			/* Switch ASID and run the invalidate loop under cli */
169			local_irq_save(flags);
170			switch_and_save_asid(mm_asid);
171		}
172
173		aligned_start = start & PAGE_MASK;
174		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
175
176		while (aligned_start < after_last_page_start) {
177			struct vm_area_struct *vma;
178			unsigned long vma_end;
179			vma = find_vma(mm, aligned_start);
180			if (!vma || (aligned_start <= vma->vm_end)) {
181				/* Avoid getting stuck in an error condition */
182				aligned_start += PAGE_SIZE;
183				continue;
184			}
185			vma_end = vma->vm_end;
186			if (vma->vm_flags & VM_EXEC) {
187				/* Executable */
188				eaddr = aligned_start;
189				while (eaddr < vma_end) {
190					sh64_icache_inv_user_page(vma, eaddr);
191					eaddr += PAGE_SIZE;
192				}
193			}
194			aligned_start = vma->vm_end; /* Skip to start of next region */
195		}
196
197		if (mm_asid != current_asid) {
198			switch_and_save_asid(current_asid);
199			local_irq_restore(flags);
200		}
201	}
202}
203
204static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
205{
206	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
207	   cache hit on the virtual tag the instruction ends there, without a
208	   TLB lookup. */
209
210	unsigned long long aligned_start;
211	unsigned long long ull_end;
212	unsigned long long addr;
213
214	ull_end = end;
215
216	/* Just invalidate over the range using the natural addresses.  TLB
217	   miss handling will be OK (TBC).  Since it's for the current process,
218	   either we're already in the right ASID context, or the ASIDs have
219	   been recycled since we were last active in which case we might just
220	   invalidate another processes I-cache entries : no worries, just a
221	   performance drop for him. */
222	aligned_start = L1_CACHE_ALIGN(start);
223	addr = aligned_start;
224	while (addr < ull_end) {
225		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
226		__asm__ __volatile__ ("nop");
227		__asm__ __volatile__ ("nop");
228		addr += L1_CACHE_BYTES;
229	}
230}
231
232/* Buffer used as the target of alloco instructions to purge data from cache
233   sets by natural eviction. -- RPC */
234#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
235static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
236
237static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
238{
239	/* Purge all ways in a particular block of sets, specified by the base
240	   set number and number of sets.  Can handle wrap-around, if that's
241	   needed.  */
242
243	int dummy_buffer_base_set;
244	unsigned long long eaddr, eaddr0, eaddr1;
245	int j;
246	int set_offset;
247
248	dummy_buffer_base_set = ((int)&dummy_alloco_area &
249				 cpu_data->dcache.entry_mask) >>
250				 cpu_data->dcache.entry_shift;
251	set_offset = sets_to_purge_base - dummy_buffer_base_set;
252
253	for (j = 0; j < n_sets; j++, set_offset++) {
254		set_offset &= (cpu_data->dcache.sets - 1);
255		eaddr0 = (unsigned long long)dummy_alloco_area +
256			(set_offset << cpu_data->dcache.entry_shift);
257
258		/*
259		 * Do one alloco which hits the required set per cache
260		 * way.  For write-back mode, this will purge the #ways
261		 * resident lines.  There's little point unrolling this
262		 * loop because the allocos stall more if they're too
263		 * close together.
264		 */
265		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
266				  cpu_data->dcache.ways;
267
268		for (eaddr = eaddr0; eaddr < eaddr1;
269		     eaddr += cpu_data->dcache.way_size) {
270			__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
271			__asm__ __volatile__ ("synco"); /* TAKum03020 */
272		}
273
274		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
275				  cpu_data->dcache.ways;
276
277		for (eaddr = eaddr0; eaddr < eaddr1;
278		     eaddr += cpu_data->dcache.way_size) {
279			/*
280			 * Load from each address.  Required because
281			 * alloco is a NOP if the cache is write-through.
282			 */
283			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
284				__raw_readb((unsigned long)eaddr);
285		}
286	}
287
288	/*
289	 * Don't use OCBI to invalidate the lines.  That costs cycles
290	 * directly.  If the dummy block is just left resident, it will
291	 * naturally get evicted as required.
292	 */
293}
294
295/*
296 * Purge the entire contents of the dcache.  The most efficient way to
297 * achieve this is to use alloco instructions on a region of unused
298 * memory equal in size to the cache, thereby causing the current
299 * contents to be discarded by natural eviction.  The alternative, namely
300 * reading every tag, setting up a mapping for the corresponding page and
301 * doing an OCBP for the line, would be much more expensive.
302 */
303static void sh64_dcache_purge_all(void)
304{
305
306	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
307}
308
309
310/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
311   anything else in the kernel */
312#define MAGIC_PAGE0_START 0xffffffffec000000ULL
313
314/* Purge the physical page 'paddr' from the cache.  It's known that any
315 * cache lines requiring attention have the same page colour as the the
316 * address 'eaddr'.
317 *
318 * This relies on the fact that the D-cache matches on physical tags when
319 * no virtual tag matches.  So we create an alias for the original page
320 * and purge through that.  (Alternatively, we could have done this by
321 * switching ASID to match the original mapping and purged through that,
322 * but that involves ASID switching cost + probably a TLBMISS + refill
323 * anyway.)
324 */
325static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
326					        unsigned long eaddr)
327{
328	unsigned long long magic_page_start;
329	unsigned long long magic_eaddr, magic_eaddr_end;
330
331	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
332
333	/* As long as the kernel is not pre-emptible, this doesn't need to be
334	   under cli/sti. */
335	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
336
337	magic_eaddr = magic_page_start;
338	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
339
340	while (magic_eaddr < magic_eaddr_end) {
341		/* Little point in unrolling this loop - the OCBPs are blocking
342		   and won't go any quicker (i.e. the loop overhead is parallel
343		   to part of the OCBP execution.) */
344		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
345		magic_eaddr += L1_CACHE_BYTES;
346	}
347
348	sh64_teardown_dtlb_cache_slot();
349}
350
351/*
352 * Purge a page given its physical start address, by creating a temporary
353 * 1 page mapping and purging across that.  Even if we know the virtual
354 * address (& vma or mm) of the page, the method here is more elegant
355 * because it avoids issues of coping with page faults on the purge
356 * instructions (i.e. no special-case code required in the critical path
357 * in the TLB miss handling).
358 */
359static void sh64_dcache_purge_phy_page(unsigned long paddr)
360{
361	unsigned long long eaddr_start, eaddr, eaddr_end;
362	int i;
363
364	/* As long as the kernel is not pre-emptible, this doesn't need to be
365	   under cli/sti. */
366	eaddr_start = MAGIC_PAGE0_START;
367	for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
368		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
369
370		eaddr = eaddr_start;
371		eaddr_end = eaddr + PAGE_SIZE;
372		while (eaddr < eaddr_end) {
373			__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
374			eaddr += L1_CACHE_BYTES;
375		}
376
377		sh64_teardown_dtlb_cache_slot();
378		eaddr_start += PAGE_SIZE;
379	}
380}
381
382static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
383				unsigned long addr, unsigned long end)
384{
385	pgd_t *pgd;
386	pud_t *pud;
387	pmd_t *pmd;
388	pte_t *pte;
389	pte_t entry;
390	spinlock_t *ptl;
391	unsigned long paddr;
392
393	if (!mm)
394		return; /* No way to find physical address of page */
395
396	pgd = pgd_offset(mm, addr);
397	if (pgd_bad(*pgd))
398		return;
399
400	pud = pud_offset(pgd, addr);
401	if (pud_none(*pud) || pud_bad(*pud))
402		return;
403
404	pmd = pmd_offset(pud, addr);
405	if (pmd_none(*pmd) || pmd_bad(*pmd))
406		return;
407
408	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
409	do {
410		entry = *pte;
411		if (pte_none(entry) || !pte_present(entry))
412			continue;
413		paddr = pte_val(entry) & PAGE_MASK;
414		sh64_dcache_purge_coloured_phy_page(paddr, addr);
415	} while (pte++, addr += PAGE_SIZE, addr != end);
416	pte_unmap_unlock(pte - 1, ptl);
417}
418
419/*
420 * There are at least 5 choices for the implementation of this, with
421 * pros (+), cons(-), comments(*):
422 *
423 * 1. ocbp each line in the range through the original user's ASID
424 *    + no lines spuriously evicted
425 *    - tlbmiss handling (must either handle faults on demand => extra
426 *	special-case code in tlbmiss critical path), or map the page in
427 *	advance (=> flush_tlb_range in advance to avoid multiple hits)
428 *    - ASID switching
429 *    - expensive for large ranges
430 *
431 * 2. temporarily map each page in the range to a special effective
432 *    address and ocbp through the temporary mapping; relies on the
433 *    fact that SH-5 OCB* always do TLB lookup and match on ptags (they
434 *    never look at the etags)
435 *    + no spurious evictions
436 *    - expensive for large ranges
437 *    * surely cheaper than (1)
438 *
439 * 3. walk all the lines in the cache, check the tags, if a match
440 *    occurs create a page mapping to ocbp the line through
441 *    + no spurious evictions
442 *    - tag inspection overhead
443 *    - (especially for small ranges)
444 *    - potential cost of setting up/tearing down page mapping for
445 *	every line that matches the range
446 *    * cost partly independent of range size
447 *
448 * 4. walk all the lines in the cache, check the tags, if a match
449 *    occurs use 4 * alloco to purge the line (+3 other probably
450 *    innocent victims) by natural eviction
451 *    + no tlb mapping overheads
452 *    - spurious evictions
453 *    - tag inspection overhead
454 *
455 * 5. implement like flush_cache_all
456 *    + no tag inspection overhead
457 *    - spurious evictions
458 *    - bad for small ranges
459 *
460 * (1) can be ruled out as more expensive than (2).  (2) appears best
461 * for small ranges.  The choice between (3), (4) and (5) for large
462 * ranges and the range size for the large/small boundary need
463 * benchmarking to determine.
464 *
465 * For now use approach (2) for small ranges and (5) for large ones.
466 */
467static void sh64_dcache_purge_user_range(struct mm_struct *mm,
468			  unsigned long start, unsigned long end)
469{
470	int n_pages = ((end - start) >> PAGE_SHIFT);
471
472	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
473		sh64_dcache_purge_all();
474	} else {
475		/* Small range, covered by a single page table page */
476		start &= PAGE_MASK;	/* should already be so */
477		end = PAGE_ALIGN(end);	/* should already be so */
478		sh64_dcache_purge_user_pages(mm, start, end);
479	}
480}
481
482/*
483 * Invalidate the entire contents of both caches, after writing back to
484 * memory any dirty data from the D-cache.
485 */
486static void sh5_flush_cache_all(void *unused)
487{
488	sh64_dcache_purge_all();
489	sh64_icache_inv_all();
490}
491
492/*
493 * Invalidate an entire user-address space from both caches, after
494 * writing back dirty data (e.g. for shared mmap etc).
495 *
496 * This could be coded selectively by inspecting all the tags then
497 * doing 4*alloco on any set containing a match (as for
498 * flush_cache_range), but fork/exit/execve (where this is called from)
499 * are expensive anyway.
500 *
501 * Have to do a purge here, despite the comments re I-cache below.
502 * There could be odd-coloured dirty data associated with the mm still
503 * in the cache - if this gets written out through natural eviction
504 * after the kernel has reused the page there will be chaos.
505 *
506 * The mm being torn down won't ever be active again, so any Icache
507 * lines tagged with its ASID won't be visible for the rest of the
508 * lifetime of this ASID cycle.  Before the ASID gets reused, there
509 * will be a flush_cache_all.  Hence we don't need to touch the
510 * I-cache.  This is similar to the lack of action needed in
511 * flush_tlb_mm - see fault.c.
512 */
513static void sh5_flush_cache_mm(void *unused)
514{
515	sh64_dcache_purge_all();
516}
517
518/*
519 * Invalidate (from both caches) the range [start,end) of virtual
520 * addresses from the user address space specified by mm, after writing
521 * back any dirty data.
522 *
523 * Note, 'end' is 1 byte beyond the end of the range to flush.
524 */
525static void sh5_flush_cache_range(void *args)
526{
527	struct flusher_data *data = args;
528	struct vm_area_struct *vma;
529	unsigned long start, end;
530
531	vma = data->vma;
532	start = data->addr1;
533	end = data->addr2;
534
535	sh64_dcache_purge_user_range(vma->vm_mm, start, end);
536	sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
537}
538
539/*
540 * Invalidate any entries in either cache for the vma within the user
541 * address space vma->vm_mm for the page starting at virtual address
542 * 'eaddr'.   This seems to be used primarily in breaking COW.  Note,
543 * the I-cache must be searched too in case the page in question is
544 * both writable and being executed from (e.g. stack trampolines.)
545 *
546 * Note, this is called with pte lock held.
547 */
548static void sh5_flush_cache_page(void *args)
549{
550	struct flusher_data *data = args;
551	struct vm_area_struct *vma;
552	unsigned long eaddr, pfn;
553
554	vma = data->vma;
555	eaddr = data->addr1;
556	pfn = data->addr2;
557
558	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
559
560	if (vma->vm_flags & VM_EXEC)
561		sh64_icache_inv_user_page(vma, eaddr);
562}
563
564static void sh5_flush_dcache_page(void *page)
565{
566	sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
567	wmb();
568}
569
570/*
571 * Flush the range [start,end] of kernel virtual address space from
572 * the I-cache.  The corresponding range must be purged from the
573 * D-cache also because the SH-5 doesn't have cache snooping between
574 * the caches.  The addresses will be visible through the superpage
575 * mapping, therefore it's guaranteed that there no cache entries for
576 * the range in cache sets of the wrong colour.
577 */
578static void sh5_flush_icache_range(void *args)
579{
580	struct flusher_data *data = args;
581	unsigned long start, end;
582
583	start = data->addr1;
584	end = data->addr2;
585
586	__flush_purge_region((void *)start, end);
587	wmb();
588	sh64_icache_inv_kernel_range(start, end);
589}
590
591/*
592 * For the address range [start,end), write back the data from the
593 * D-cache and invalidate the corresponding region of the I-cache for the
594 * current process.  Used to flush signal trampolines on the stack to
595 * make them executable.
596 */
597static void sh5_flush_cache_sigtramp(void *vaddr)
598{
599	unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
600
601	__flush_wback_region(vaddr, L1_CACHE_BYTES);
602	wmb();
603	sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
604}
605
606void __init sh5_cache_init(void)
607{
608	local_flush_cache_all		= sh5_flush_cache_all;
609	local_flush_cache_mm		= sh5_flush_cache_mm;
610	local_flush_cache_dup_mm	= sh5_flush_cache_mm;
611	local_flush_cache_page		= sh5_flush_cache_page;
612	local_flush_cache_range		= sh5_flush_cache_range;
613	local_flush_dcache_page		= sh5_flush_dcache_page;
614	local_flush_icache_range	= sh5_flush_icache_range;
615	local_flush_cache_sigtramp	= sh5_flush_cache_sigtramp;
616
617	/* Reserve a slot for dcache colouring in the DTLB */
618	dtlb_cache_slot	= sh64_get_wired_dtlb_entry();
619
620	sh4__flush_region_init();
621}
622