1/*
2 *  linux/arch/arm/mm/mmap.c
3 */
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/mman.h>
7#include <linux/shm.h>
8#include <linux/sched.h>
9#include <linux/io.h>
10#include <linux/personality.h>
11#include <linux/random.h>
12#include <asm/cachetype.h>
13
14#define COLOUR_ALIGN(addr,pgoff)		\
15	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
16	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
17
18/* gap between mmap and stack */
19#define MIN_GAP (128*1024*1024UL)
20#define MAX_GAP ((TASK_SIZE)/6*5)
21
22static int mmap_is_legacy(void)
23{
24	if (current->personality & ADDR_COMPAT_LAYOUT)
25		return 1;
26
27	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
28		return 1;
29
30	return sysctl_legacy_va_layout;
31}
32
33static unsigned long mmap_base(unsigned long rnd)
34{
35	unsigned long gap = rlimit(RLIMIT_STACK);
36
37	if (gap < MIN_GAP)
38		gap = MIN_GAP;
39	else if (gap > MAX_GAP)
40		gap = MAX_GAP;
41
42	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
43}
44
45/*
46 * We need to ensure that shared mappings are correctly aligned to
47 * avoid aliasing issues with VIPT caches.  We need to ensure that
48 * a specific page of an object is always mapped at a multiple of
49 * SHMLBA bytes.
50 *
51 * We unconditionally provide this function for all cases, however
52 * in the VIVT case, we optimise out the alignment rules.
53 */
54unsigned long
55arch_get_unmapped_area(struct file *filp, unsigned long addr,
56		unsigned long len, unsigned long pgoff, unsigned long flags)
57{
58	struct mm_struct *mm = current->mm;
59	struct vm_area_struct *vma;
60	int do_align = 0;
61	int aliasing = cache_is_vipt_aliasing();
62	struct vm_unmapped_area_info info;
63
64	/*
65	 * We only need to do colour alignment if either the I or D
66	 * caches alias.
67	 */
68	if (aliasing)
69		do_align = filp || (flags & MAP_SHARED);
70
71	/*
72	 * We enforce the MAP_FIXED case.
73	 */
74	if (flags & MAP_FIXED) {
75		if (aliasing && flags & MAP_SHARED &&
76		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
77			return -EINVAL;
78		return addr;
79	}
80
81	if (len > TASK_SIZE)
82		return -ENOMEM;
83
84	if (addr) {
85		if (do_align)
86			addr = COLOUR_ALIGN(addr, pgoff);
87		else
88			addr = PAGE_ALIGN(addr);
89
90		vma = find_vma(mm, addr);
91		if (TASK_SIZE - len >= addr &&
92		    (!vma || addr + len <= vma->vm_start))
93			return addr;
94	}
95
96	info.flags = 0;
97	info.length = len;
98	info.low_limit = mm->mmap_base;
99	info.high_limit = TASK_SIZE;
100	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
101	info.align_offset = pgoff << PAGE_SHIFT;
102	return vm_unmapped_area(&info);
103}
104
105unsigned long
106arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
107			const unsigned long len, const unsigned long pgoff,
108			const unsigned long flags)
109{
110	struct vm_area_struct *vma;
111	struct mm_struct *mm = current->mm;
112	unsigned long addr = addr0;
113	int do_align = 0;
114	int aliasing = cache_is_vipt_aliasing();
115	struct vm_unmapped_area_info info;
116
117	/*
118	 * We only need to do colour alignment if either the I or D
119	 * caches alias.
120	 */
121	if (aliasing)
122		do_align = filp || (flags & MAP_SHARED);
123
124	/* requested length too big for entire address space */
125	if (len > TASK_SIZE)
126		return -ENOMEM;
127
128	if (flags & MAP_FIXED) {
129		if (aliasing && flags & MAP_SHARED &&
130		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
131			return -EINVAL;
132		return addr;
133	}
134
135	/* requesting a specific address */
136	if (addr) {
137		if (do_align)
138			addr = COLOUR_ALIGN(addr, pgoff);
139		else
140			addr = PAGE_ALIGN(addr);
141		vma = find_vma(mm, addr);
142		if (TASK_SIZE - len >= addr &&
143				(!vma || addr + len <= vma->vm_start))
144			return addr;
145	}
146
147	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148	info.length = len;
149	info.low_limit = FIRST_USER_ADDRESS;
150	info.high_limit = mm->mmap_base;
151	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152	info.align_offset = pgoff << PAGE_SHIFT;
153	addr = vm_unmapped_area(&info);
154
155	/*
156	 * A failed mmap() very likely causes application failure,
157	 * so fall back to the bottom-up function here. This scenario
158	 * can happen with large stack limits and large mmap()
159	 * allocations.
160	 */
161	if (addr & ~PAGE_MASK) {
162		VM_BUG_ON(addr != -ENOMEM);
163		info.flags = 0;
164		info.low_limit = mm->mmap_base;
165		info.high_limit = TASK_SIZE;
166		addr = vm_unmapped_area(&info);
167	}
168
169	return addr;
170}
171
172unsigned long arch_mmap_rnd(void)
173{
174	unsigned long rnd;
175
176	/* 8 bits of randomness in 20 address space bits */
177	rnd = (unsigned long)get_random_int() % (1 << 8);
178
179	return rnd << PAGE_SHIFT;
180}
181
182void arch_pick_mmap_layout(struct mm_struct *mm)
183{
184	unsigned long random_factor = 0UL;
185
186	if (current->flags & PF_RANDOMIZE)
187		random_factor = arch_mmap_rnd();
188
189	if (mmap_is_legacy()) {
190		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
191		mm->get_unmapped_area = arch_get_unmapped_area;
192	} else {
193		mm->mmap_base = mmap_base(random_factor);
194		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
195	}
196}
197
198/*
199 * You really shouldn't be using read() or write() on /dev/mem.  This
200 * might go away in the future.
201 */
202int valid_phys_addr_range(phys_addr_t addr, size_t size)
203{
204	if (addr < PHYS_OFFSET)
205		return 0;
206	if (addr + size > __pa(high_memory - 1) + 1)
207		return 0;
208
209	return 1;
210}
211
212/*
213 * Do not allow /dev/mem mappings beyond the supported physical range.
214 */
215int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
216{
217	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
218}
219
220#ifdef CONFIG_STRICT_DEVMEM
221
222#include <linux/ioport.h>
223
224/*
225 * devmem_is_allowed() checks to see if /dev/mem access to a certain
226 * address is valid. The argument is a physical page number.
227 * We mimic x86 here by disallowing access to system RAM as well as
228 * device-exclusive MMIO regions. This effectively disable read()/write()
229 * on /dev/mem.
230 */
231int devmem_is_allowed(unsigned long pfn)
232{
233	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
234		return 0;
235	if (!page_is_ram(pfn))
236		return 1;
237	return 0;
238}
239
240#endif
241