This source file includes following definitions.
- stack_maxrandom_size
- mmap_is_legacy
- arch_mmap_rnd
- mmap_base_legacy
- mmap_base
- arch_get_unmapped_area
- arch_get_unmapped_area_topdown
- arch_pick_mmap_layout
1
2
3
4
5
6
7
8
9
10
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <asm/pgalloc.h>
21 #include <asm/elf.h>
22
23 static unsigned long stack_maxrandom_size(void)
24 {
25 if (!(current->flags & PF_RANDOMIZE))
26 return 0;
27 return STACK_RND_MASK << PAGE_SHIFT;
28 }
29
30 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
31 {
32 if (current->personality & ADDR_COMPAT_LAYOUT)
33 return 1;
34 if (rlim_stack->rlim_cur == RLIM_INFINITY)
35 return 1;
36 return sysctl_legacy_va_layout;
37 }
38
39 unsigned long arch_mmap_rnd(void)
40 {
41 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
42 }
43
44 static unsigned long mmap_base_legacy(unsigned long rnd)
45 {
46 return TASK_UNMAPPED_BASE + rnd;
47 }
48
49 static inline unsigned long mmap_base(unsigned long rnd,
50 struct rlimit *rlim_stack)
51 {
52 unsigned long gap = rlim_stack->rlim_cur;
53 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
54 unsigned long gap_min, gap_max;
55
56
57 if (gap + pad > gap)
58 gap += pad;
59
60
61
62
63
64 gap_min = 32 * 1024 * 1024UL;
65 gap_max = (STACK_TOP / 6) * 5;
66
67 if (gap < gap_min)
68 gap = gap_min;
69 else if (gap > gap_max)
70 gap = gap_max;
71
72 return PAGE_ALIGN(STACK_TOP - gap - rnd);
73 }
74
75 unsigned long
76 arch_get_unmapped_area(struct file *filp, unsigned long addr,
77 unsigned long len, unsigned long pgoff, unsigned long flags)
78 {
79 struct mm_struct *mm = current->mm;
80 struct vm_area_struct *vma;
81 struct vm_unmapped_area_info info;
82 int rc;
83
84 if (len > TASK_SIZE - mmap_min_addr)
85 return -ENOMEM;
86
87 if (flags & MAP_FIXED)
88 goto check_asce_limit;
89
90 if (addr) {
91 addr = PAGE_ALIGN(addr);
92 vma = find_vma(mm, addr);
93 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
94 (!vma || addr + len <= vm_start_gap(vma)))
95 goto check_asce_limit;
96 }
97
98 info.flags = 0;
99 info.length = len;
100 info.low_limit = mm->mmap_base;
101 info.high_limit = TASK_SIZE;
102 if (filp || (flags & MAP_SHARED))
103 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
104 else
105 info.align_mask = 0;
106 info.align_offset = pgoff << PAGE_SHIFT;
107 addr = vm_unmapped_area(&info);
108 if (addr & ~PAGE_MASK)
109 return addr;
110
111 check_asce_limit:
112 if (addr + len > current->mm->context.asce_limit &&
113 addr + len <= TASK_SIZE) {
114 rc = crst_table_upgrade(mm, addr + len);
115 if (rc)
116 return (unsigned long) rc;
117 }
118
119 return addr;
120 }
121
122 unsigned long
123 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
124 const unsigned long len, const unsigned long pgoff,
125 const unsigned long flags)
126 {
127 struct vm_area_struct *vma;
128 struct mm_struct *mm = current->mm;
129 unsigned long addr = addr0;
130 struct vm_unmapped_area_info info;
131 int rc;
132
133
134 if (len > TASK_SIZE - mmap_min_addr)
135 return -ENOMEM;
136
137 if (flags & MAP_FIXED)
138 goto check_asce_limit;
139
140
141 if (addr) {
142 addr = PAGE_ALIGN(addr);
143 vma = find_vma(mm, addr);
144 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
145 (!vma || addr + len <= vm_start_gap(vma)))
146 goto check_asce_limit;
147 }
148
149 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
150 info.length = len;
151 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
152 info.high_limit = mm->mmap_base;
153 if (filp || (flags & MAP_SHARED))
154 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
155 else
156 info.align_mask = 0;
157 info.align_offset = pgoff << PAGE_SHIFT;
158 addr = vm_unmapped_area(&info);
159
160
161
162
163
164
165
166 if (addr & ~PAGE_MASK) {
167 VM_BUG_ON(addr != -ENOMEM);
168 info.flags = 0;
169 info.low_limit = TASK_UNMAPPED_BASE;
170 info.high_limit = TASK_SIZE;
171 addr = vm_unmapped_area(&info);
172 if (addr & ~PAGE_MASK)
173 return addr;
174 }
175
176 check_asce_limit:
177 if (addr + len > current->mm->context.asce_limit &&
178 addr + len <= TASK_SIZE) {
179 rc = crst_table_upgrade(mm, addr + len);
180 if (rc)
181 return (unsigned long) rc;
182 }
183
184 return addr;
185 }
186
187
188
189
190
191 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
192 {
193 unsigned long random_factor = 0UL;
194
195 if (current->flags & PF_RANDOMIZE)
196 random_factor = arch_mmap_rnd();
197
198
199
200
201
202 if (mmap_is_legacy(rlim_stack)) {
203 mm->mmap_base = mmap_base_legacy(random_factor);
204 mm->get_unmapped_area = arch_get_unmapped_area;
205 } else {
206 mm->mmap_base = mmap_base(random_factor, rlim_stack);
207 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
208 }
209 }