1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #ifndef __ASM_OPENRISC_PAGE_H
16 #define __ASM_OPENRISC_PAGE_H
17
18
19
20
21 #define PAGE_SHIFT 13
22 #ifdef __ASSEMBLY__
23 #define PAGE_SIZE (1 << PAGE_SHIFT)
24 #else
25 #define PAGE_SIZE (1UL << PAGE_SHIFT)
26 #endif
27 #define PAGE_MASK (~(PAGE_SIZE-1))
28
29 #define PAGE_OFFSET 0xc0000000
30 #define KERNELBASE PAGE_OFFSET
31
32
33
34
35 #include <asm/setup.h>
36
37 #ifndef __ASSEMBLY__
38
39 #define clear_page(page) memset((page), 0, PAGE_SIZE)
40 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
41
42 #define clear_user_page(page, vaddr, pg) clear_page(page)
43 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
44
45
46
47
48 typedef struct {
49 unsigned long pte;
50 } pte_t;
51 typedef struct {
52 unsigned long pgd;
53 } pgd_t;
54 typedef struct {
55 unsigned long pgprot;
56 } pgprot_t;
57 typedef struct page *pgtable_t;
58
59 #define pte_val(x) ((x).pte)
60 #define pgd_val(x) ((x).pgd)
61 #define pgprot_val(x) ((x).pgprot)
62
63 #define __pte(x) ((pte_t) { (x) })
64 #define __pgd(x) ((pgd_t) { (x) })
65 #define __pgprot(x) ((pgprot_t) { (x) })
66
67 #endif
68
69
70 #ifndef __ASSEMBLY__
71
72 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
73 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
74
75 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
76 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
77
78 #define virt_to_page(addr) \
79 (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
80
81 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
82
83 #define pfn_valid(pfn) ((pfn) < max_mapnr)
84
85 #define virt_addr_valid(kaddr) (pfn_valid(virt_to_pfn(kaddr)))
86
87 #endif
88
89
90 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
91 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
92
93
94 #include <asm-generic/memory_model.h>
95 #include <asm-generic/getorder.h>
96
97 #endif