1 /*
2 * flexible mmap layout support
3 *
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
5 * All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 *
22 * Started by Ingo Molnar <mingo@elte.hu>
23 */
24
25 #include <linux/personality.h>
26 #include <linux/mm.h>
27 #include <linux/mman.h>
28 #include <linux/module.h>
29 #include <linux/random.h>
30 #include <linux/compat.h>
31 #include <linux/security.h>
32 #include <asm/pgalloc.h>
33
34 unsigned long mmap_rnd_mask;
35 static unsigned long mmap_align_mask;
36
stack_maxrandom_size(void)37 static unsigned long stack_maxrandom_size(void)
38 {
39 if (!(current->flags & PF_RANDOMIZE))
40 return 0;
41 if (current->personality & ADDR_NO_RANDOMIZE)
42 return 0;
43 return STACK_RND_MASK << PAGE_SHIFT;
44 }
45
46 /*
47 * Top of mmap area (just below the process stack).
48 *
49 * Leave at least a ~32 MB hole.
50 */
51 #define MIN_GAP (32*1024*1024)
52 #define MAX_GAP (STACK_TOP/6*5)
53
mmap_is_legacy(void)54 static inline int mmap_is_legacy(void)
55 {
56 if (current->personality & ADDR_COMPAT_LAYOUT)
57 return 1;
58 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
59 return 1;
60 return sysctl_legacy_va_layout;
61 }
62
arch_mmap_rnd(void)63 unsigned long arch_mmap_rnd(void)
64 {
65 if (is_32bit_task())
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
67 else
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
69 }
70
mmap_base_legacy(unsigned long rnd)71 static unsigned long mmap_base_legacy(unsigned long rnd)
72 {
73 return TASK_UNMAPPED_BASE + rnd;
74 }
75
mmap_base(unsigned long rnd)76 static inline unsigned long mmap_base(unsigned long rnd)
77 {
78 unsigned long gap = rlimit(RLIMIT_STACK);
79
80 if (gap < MIN_GAP)
81 gap = MIN_GAP;
82 else if (gap > MAX_GAP)
83 gap = MAX_GAP;
84 gap &= PAGE_MASK;
85 return STACK_TOP - stack_maxrandom_size() - rnd - gap;
86 }
87
88 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)89 arch_get_unmapped_area(struct file *filp, unsigned long addr,
90 unsigned long len, unsigned long pgoff, unsigned long flags)
91 {
92 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma;
94 struct vm_unmapped_area_info info;
95 int do_color_align;
96
97 if (len > TASK_SIZE - mmap_min_addr)
98 return -ENOMEM;
99
100 if (flags & MAP_FIXED)
101 return addr;
102
103 if (addr) {
104 addr = PAGE_ALIGN(addr);
105 vma = find_vma(mm, addr);
106 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
107 (!vma || addr + len <= vma->vm_start))
108 return addr;
109 }
110
111 do_color_align = 0;
112 if (filp || (flags & MAP_SHARED))
113 do_color_align = !is_32bit_task();
114
115 info.flags = 0;
116 info.length = len;
117 info.low_limit = mm->mmap_base;
118 info.high_limit = TASK_SIZE;
119 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
120 info.align_offset = pgoff << PAGE_SHIFT;
121 return vm_unmapped_area(&info);
122 }
123
124 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)125 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
126 const unsigned long len, const unsigned long pgoff,
127 const unsigned long flags)
128 {
129 struct vm_area_struct *vma;
130 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0;
132 struct vm_unmapped_area_info info;
133 int do_color_align;
134
135 /* requested length too big for entire address space */
136 if (len > TASK_SIZE - mmap_min_addr)
137 return -ENOMEM;
138
139 if (flags & MAP_FIXED)
140 return addr;
141
142 /* requesting a specific address */
143 if (addr) {
144 addr = PAGE_ALIGN(addr);
145 vma = find_vma(mm, addr);
146 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
147 (!vma || addr + len <= vma->vm_start))
148 return addr;
149 }
150
151 do_color_align = 0;
152 if (filp || (flags & MAP_SHARED))
153 do_color_align = !is_32bit_task();
154
155 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
156 info.length = len;
157 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
158 info.high_limit = mm->mmap_base;
159 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
160 info.align_offset = pgoff << PAGE_SHIFT;
161 addr = vm_unmapped_area(&info);
162
163 /*
164 * A failed mmap() very likely causes application failure,
165 * so fall back to the bottom-up function here. This scenario
166 * can happen with large stack limits and large mmap()
167 * allocations.
168 */
169 if (addr & ~PAGE_MASK) {
170 VM_BUG_ON(addr != -ENOMEM);
171 info.flags = 0;
172 info.low_limit = TASK_UNMAPPED_BASE;
173 info.high_limit = TASK_SIZE;
174 addr = vm_unmapped_area(&info);
175 }
176
177 return addr;
178 }
179
s390_mmap_check(unsigned long addr,unsigned long len,unsigned long flags)180 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
181 {
182 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
183 return 0;
184 if (!(flags & MAP_FIXED))
185 addr = 0;
186 if ((addr + len) >= TASK_SIZE)
187 return crst_table_upgrade(current->mm, 1UL << 53);
188 return 0;
189 }
190
191 static unsigned long
s390_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)192 s390_get_unmapped_area(struct file *filp, unsigned long addr,
193 unsigned long len, unsigned long pgoff, unsigned long flags)
194 {
195 struct mm_struct *mm = current->mm;
196 unsigned long area;
197 int rc;
198
199 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
200 if (!(area & ~PAGE_MASK))
201 return area;
202 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
203 /* Upgrade the page table to 4 levels and retry. */
204 rc = crst_table_upgrade(mm, 1UL << 53);
205 if (rc)
206 return (unsigned long) rc;
207 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
208 }
209 return area;
210 }
211
212 static unsigned long
s390_get_unmapped_area_topdown(struct file * filp,const unsigned long addr,const unsigned long len,const unsigned long pgoff,const unsigned long flags)213 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
214 const unsigned long len, const unsigned long pgoff,
215 const unsigned long flags)
216 {
217 struct mm_struct *mm = current->mm;
218 unsigned long area;
219 int rc;
220
221 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
222 if (!(area & ~PAGE_MASK))
223 return area;
224 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
225 /* Upgrade the page table to 4 levels and retry. */
226 rc = crst_table_upgrade(mm, 1UL << 53);
227 if (rc)
228 return (unsigned long) rc;
229 area = arch_get_unmapped_area_topdown(filp, addr, len,
230 pgoff, flags);
231 }
232 return area;
233 }
234 /*
235 * This function, called very early during the creation of a new
236 * process VM image, sets up which VM layout function to use:
237 */
arch_pick_mmap_layout(struct mm_struct * mm)238 void arch_pick_mmap_layout(struct mm_struct *mm)
239 {
240 unsigned long random_factor = 0UL;
241
242 if (current->flags & PF_RANDOMIZE)
243 random_factor = arch_mmap_rnd();
244
245 /*
246 * Fall back to the standard layout if the personality
247 * bit is set, or if the expected stack growth is unlimited:
248 */
249 if (mmap_is_legacy()) {
250 mm->mmap_base = mmap_base_legacy(random_factor);
251 mm->get_unmapped_area = s390_get_unmapped_area;
252 } else {
253 mm->mmap_base = mmap_base(random_factor);
254 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
255 }
256 }
257
setup_mmap_rnd(void)258 static int __init setup_mmap_rnd(void)
259 {
260 struct cpuid cpu_id;
261
262 get_cpu_id(&cpu_id);
263 switch (cpu_id.machine) {
264 case 0x9672:
265 case 0x2064:
266 case 0x2066:
267 case 0x2084:
268 case 0x2086:
269 case 0x2094:
270 case 0x2096:
271 case 0x2097:
272 case 0x2098:
273 case 0x2817:
274 case 0x2818:
275 case 0x2827:
276 case 0x2828:
277 mmap_rnd_mask = 0x7ffUL;
278 mmap_align_mask = 0UL;
279 break;
280 case 0x2964: /* z13 */
281 default:
282 mmap_rnd_mask = 0x3ff80UL;
283 mmap_align_mask = 0x7fUL;
284 break;
285 }
286 return 0;
287 }
288 early_initcall(setup_mmap_rnd);
289