1#ifndef _ASM_M32R_TLBFLUSH_H
2#define _ASM_M32R_TLBFLUSH_H
3
4#include <asm/m32r.h>
5
6/*
7 * TLB flushing:
8 *
9 *  - flush_tlb() flushes the current mm struct TLBs
10 *  - flush_tlb_all() flushes all processes TLBs
11 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
12 *  - flush_tlb_page(vma, vmaddr) flushes one page
13 *  - flush_tlb_range(vma, start, end) flushes a range of pages
14 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
15 */
16
17extern void local_flush_tlb_all(void);
18extern void local_flush_tlb_mm(struct mm_struct *);
19extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long);
20extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long,
21	unsigned long);
22
23#ifndef CONFIG_SMP
24#ifdef CONFIG_MMU
25#define flush_tlb_all()			local_flush_tlb_all()
26#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
27#define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)
28#define flush_tlb_range(vma, start, end)	\
29	local_flush_tlb_range(vma, start, end)
30#define flush_tlb_kernel_range(start, end)	local_flush_tlb_all()
31#else	/* CONFIG_MMU */
32#define flush_tlb_all()			do { } while (0)
33#define flush_tlb_mm(mm)		do { } while (0)
34#define flush_tlb_page(vma, vmaddr)	do { } while (0)
35#define flush_tlb_range(vma, start, end)	do { } while (0)
36#endif	/* CONFIG_MMU */
37#else	/* CONFIG_SMP */
38extern void smp_flush_tlb_all(void);
39extern void smp_flush_tlb_mm(struct mm_struct *);
40extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
41extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long,
42	unsigned long);
43
44#define flush_tlb_all()			smp_flush_tlb_all()
45#define flush_tlb_mm(mm)		smp_flush_tlb_mm(mm)
46#define flush_tlb_page(vma, page)	smp_flush_tlb_page(vma, page)
47#define flush_tlb_range(vma, start, end)	\
48	smp_flush_tlb_range(vma, start, end)
49#define flush_tlb_kernel_range(start, end)	smp_flush_tlb_all()
50#endif	/* CONFIG_SMP */
51
52static __inline__ void __flush_tlb_page(unsigned long page)
53{
54	unsigned int tmpreg0, tmpreg1, tmpreg2;
55
56	__asm__ __volatile__ (
57		"seth	%0, #high(%4)	\n\t"
58		"st	%3, @(%5, %0)	\n\t"
59		"ldi	%1, #1		\n\t"
60		"st	%1, @(%6, %0)	\n\t"
61		"add3	%1, %0, %7	\n\t"
62		".fillinsn		\n"
63		"1:			\n\t"
64		"ld	%2, @(%6, %0)	\n\t"
65		"bnez	%2, 1b		\n\t"
66		"ld	%0, @%1+	\n\t"
67		"ld	%1, @%1		\n\t"
68		"st	%2, @+%0	\n\t"
69		"st	%2, @+%1	\n\t"
70		: "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2)
71		: "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
72		"i" (MTOP_offset), "i" (MIDXI_offset)
73		: "memory"
74	);
75}
76
77static __inline__ void __flush_tlb_all(void)
78{
79	unsigned int tmpreg0, tmpreg1;
80
81	__asm__ __volatile__ (
82		"seth	%0, #high(%2)		\n\t"
83		"or3	%0, %0, #low(%2)	\n\t"
84		"ldi	%1, #0xc		\n\t"
85		"st	%1, @%0			\n\t"
86		".fillinsn			\n"
87		"1:				\n\t"
88		"ld	%1, @%0			\n\t"
89		"bnez	%1, 1b			\n\t"
90		: "=&r" (tmpreg0), "=&r" (tmpreg1)
91		: "i" (MTOP) : "memory"
92	);
93}
94
95extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
96
97#endif	/* _ASM_M32R_TLBFLUSH_H */
98