This source file includes following definitions.
- flush_dcache_page
- update_mmu_cache
- flush_kernel_dcache_page
- flush_cache_range
1
2
3
4 #include <linux/kernel.h>
5 #include <linux/mm.h>
6 #include <linux/fs.h>
7 #include <linux/syscalls.h>
8 #include <linux/spinlock.h>
9 #include <asm/page.h>
10 #include <asm/cache.h>
11 #include <asm/cacheflush.h>
12 #include <asm/cachectl.h>
13
14 #define PG_dcache_clean PG_arch_1
15
16 void flush_dcache_page(struct page *page)
17 {
18 struct address_space *mapping;
19
20 if (page == ZERO_PAGE(0))
21 return;
22
23 mapping = page_mapping_file(page);
24
25 if (mapping && !page_mapcount(page))
26 clear_bit(PG_dcache_clean, &page->flags);
27 else {
28 dcache_wbinv_all();
29 if (mapping)
30 icache_inv_all();
31 set_bit(PG_dcache_clean, &page->flags);
32 }
33 }
34 EXPORT_SYMBOL(flush_dcache_page);
35
36 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
37 pte_t *ptep)
38 {
39 unsigned long pfn = pte_pfn(*ptep);
40 struct page *page;
41
42 if (!pfn_valid(pfn))
43 return;
44
45 page = pfn_to_page(pfn);
46 if (page == ZERO_PAGE(0))
47 return;
48
49 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
50 dcache_wbinv_all();
51
52 if (page_mapping_file(page)) {
53 if (vma->vm_flags & VM_EXEC)
54 icache_inv_all();
55 }
56 }
57
58 void flush_kernel_dcache_page(struct page *page)
59 {
60 struct address_space *mapping;
61
62 mapping = page_mapping_file(page);
63
64 if (!mapping || mapping_mapped(mapping))
65 dcache_wbinv_all();
66 }
67 EXPORT_SYMBOL(flush_kernel_dcache_page);
68
69 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
70 unsigned long end)
71 {
72 dcache_wbinv_all();
73
74 if (vma->vm_flags & VM_EXEC)
75 icache_inv_all();
76 }