This source file includes following definitions.
- copy_to_user_page
- copy_from_user_page
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #ifndef _ASM_MICROBLAZE_CACHEFLUSH_H
16 #define _ASM_MICROBLAZE_CACHEFLUSH_H
17
18
19 #include <linux/mm.h>
20 #include <linux/io.h>
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 struct scache {
36
37 void (*ie)(void);
38 void (*id)(void);
39 void (*ifl)(void);
40 void (*iflr)(unsigned long a, unsigned long b);
41 void (*iin)(void);
42 void (*iinr)(unsigned long a, unsigned long b);
43
44 void (*de)(void);
45 void (*dd)(void);
46 void (*dfl)(void);
47 void (*dflr)(unsigned long a, unsigned long b);
48 void (*din)(void);
49 void (*dinr)(unsigned long a, unsigned long b);
50 };
51
52
53 extern struct scache *mbc;
54
55 void microblaze_cache_init(void);
56
57 #define enable_icache() mbc->ie();
58 #define disable_icache() mbc->id();
59 #define flush_icache() mbc->ifl();
60 #define flush_icache_range(start, end) mbc->iflr(start, end);
61 #define invalidate_icache() mbc->iin();
62 #define invalidate_icache_range(start, end) mbc->iinr(start, end);
63
64 #define flush_icache_user_range(vma, pg, adr, len) flush_icache();
65 #define flush_icache_page(vma, pg) do { } while (0)
66
67 #define enable_dcache() mbc->de();
68 #define disable_dcache() mbc->dd();
69
70 #define invalidate_dcache() mbc->din();
71 #define invalidate_dcache_range(start, end) mbc->dinr(start, end);
72 #define flush_dcache() mbc->dfl();
73 #define flush_dcache_range(start, end) mbc->dflr(start, end);
74
75 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
76
77 #define flush_dcache_page(page) \
78 do { \
79 unsigned long addr = (unsigned long) page_address(page); \
80 addr = (u32)virt_to_phys((void *)addr); \
81 flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \
82 } while (0);
83
84 #define flush_dcache_mmap_lock(mapping) do { } while (0)
85 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
86
87 #define flush_cache_dup_mm(mm) do { } while (0)
88 #define flush_cache_vmap(start, end) do { } while (0)
89 #define flush_cache_vunmap(start, end) do { } while (0)
90 #define flush_cache_mm(mm) do { } while (0)
91
92 #define flush_cache_page(vma, vmaddr, pfn) \
93 flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE);
94
95
96 #if 0
97 #define flush_cache_range(vma, start, len) { \
98 flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
99 flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
100 }
101 #endif
102
103 #define flush_cache_range(vma, start, len) do { } while (0)
104
105 static inline void copy_to_user_page(struct vm_area_struct *vma,
106 struct page *page, unsigned long vaddr,
107 void *dst, void *src, int len)
108 {
109 u32 addr = virt_to_phys(dst);
110 memcpy(dst, src, len);
111 if (vma->vm_flags & VM_EXEC) {
112 invalidate_icache_range(addr, addr + PAGE_SIZE);
113 flush_dcache_range(addr, addr + PAGE_SIZE);
114 }
115 }
116
117 static inline void copy_from_user_page(struct vm_area_struct *vma,
118 struct page *page, unsigned long vaddr,
119 void *dst, void *src, int len)
120 {
121 memcpy(dst, src, len);
122 }
123
124 #endif