This source file includes following definitions.
- process_vm_rw_pages
- process_vm_rw_single_vec
- process_vm_rw_core
- process_vm_rw
- SYSCALL_DEFINE6
- SYSCALL_DEFINE6
- compat_process_vm_rw
- COMPAT_SYSCALL_DEFINE6
- COMPAT_SYSCALL_DEFINE6
1
2
3
4
5
6
7
8 #include <linux/mm.h>
9 #include <linux/uio.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/highmem.h>
13 #include <linux/ptrace.h>
14 #include <linux/slab.h>
15 #include <linux/syscalls.h>
16
17 #ifdef CONFIG_COMPAT
18 #include <linux/compat.h>
19 #endif
20
21
22
23
24
25
26
27
28
29
30 static int process_vm_rw_pages(struct page **pages,
31 unsigned offset,
32 size_t len,
33 struct iov_iter *iter,
34 int vm_write)
35 {
36
37 while (len && iov_iter_count(iter)) {
38 struct page *page = *pages++;
39 size_t copy = PAGE_SIZE - offset;
40 size_t copied;
41
42 if (copy > len)
43 copy = len;
44
45 if (vm_write) {
46 copied = copy_page_from_iter(page, offset, copy, iter);
47 set_page_dirty_lock(page);
48 } else {
49 copied = copy_page_to_iter(page, offset, copy, iter);
50 }
51 len -= copied;
52 if (copied < copy && iov_iter_count(iter))
53 return -EFAULT;
54 offset = 0;
55 }
56 return 0;
57 }
58
59
60 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
61
62
63
64
65
66
67
68
69
70
71
72
73
74 static int process_vm_rw_single_vec(unsigned long addr,
75 unsigned long len,
76 struct iov_iter *iter,
77 struct page **process_pages,
78 struct mm_struct *mm,
79 struct task_struct *task,
80 int vm_write)
81 {
82 unsigned long pa = addr & PAGE_MASK;
83 unsigned long start_offset = addr - pa;
84 unsigned long nr_pages;
85 ssize_t rc = 0;
86 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
87 / sizeof(struct pages *);
88 unsigned int flags = 0;
89
90
91 if (len == 0)
92 return 0;
93 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
94
95 if (vm_write)
96 flags |= FOLL_WRITE;
97
98 while (!rc && nr_pages && iov_iter_count(iter)) {
99 int pages = min(nr_pages, max_pages_per_loop);
100 int locked = 1;
101 size_t bytes;
102
103
104
105
106
107
108 down_read(&mm->mmap_sem);
109 pages = get_user_pages_remote(task, mm, pa, pages, flags,
110 process_pages, NULL, &locked);
111 if (locked)
112 up_read(&mm->mmap_sem);
113 if (pages <= 0)
114 return -EFAULT;
115
116 bytes = pages * PAGE_SIZE - start_offset;
117 if (bytes > len)
118 bytes = len;
119
120 rc = process_vm_rw_pages(process_pages,
121 start_offset, bytes, iter,
122 vm_write);
123 len -= bytes;
124 start_offset = 0;
125 nr_pages -= pages;
126 pa += pages * PAGE_SIZE;
127 while (pages)
128 put_page(process_pages[--pages]);
129 }
130
131 return rc;
132 }
133
134
135
136 #define PVM_MAX_PP_ARRAY_COUNT 16
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
152 const struct iovec *rvec,
153 unsigned long riovcnt,
154 unsigned long flags, int vm_write)
155 {
156 struct task_struct *task;
157 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
158 struct page **process_pages = pp_stack;
159 struct mm_struct *mm;
160 unsigned long i;
161 ssize_t rc = 0;
162 unsigned long nr_pages = 0;
163 unsigned long nr_pages_iov;
164 ssize_t iov_len;
165 size_t total_len = iov_iter_count(iter);
166
167
168
169
170
171 for (i = 0; i < riovcnt; i++) {
172 iov_len = rvec[i].iov_len;
173 if (iov_len > 0) {
174 nr_pages_iov = ((unsigned long)rvec[i].iov_base
175 + iov_len)
176 / PAGE_SIZE - (unsigned long)rvec[i].iov_base
177 / PAGE_SIZE + 1;
178 nr_pages = max(nr_pages, nr_pages_iov);
179 }
180 }
181
182 if (nr_pages == 0)
183 return 0;
184
185 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
186
187
188 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
189 sizeof(struct pages *)*nr_pages),
190 GFP_KERNEL);
191
192 if (!process_pages)
193 return -ENOMEM;
194 }
195
196
197 task = find_get_task_by_vpid(pid);
198 if (!task) {
199 rc = -ESRCH;
200 goto free_proc_pages;
201 }
202
203 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
204 if (!mm || IS_ERR(mm)) {
205 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
206
207
208
209
210 if (rc == -EACCES)
211 rc = -EPERM;
212 goto put_task_struct;
213 }
214
215 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
216 rc = process_vm_rw_single_vec(
217 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
218 iter, process_pages, mm, task, vm_write);
219
220
221 total_len -= iov_iter_count(iter);
222
223
224
225
226 if (total_len)
227 rc = total_len;
228
229 mmput(mm);
230
231 put_task_struct:
232 put_task_struct(task);
233
234 free_proc_pages:
235 if (process_pages != pp_stack)
236 kfree(process_pages);
237 return rc;
238 }
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254 static ssize_t process_vm_rw(pid_t pid,
255 const struct iovec __user *lvec,
256 unsigned long liovcnt,
257 const struct iovec __user *rvec,
258 unsigned long riovcnt,
259 unsigned long flags, int vm_write)
260 {
261 struct iovec iovstack_l[UIO_FASTIOV];
262 struct iovec iovstack_r[UIO_FASTIOV];
263 struct iovec *iov_l = iovstack_l;
264 struct iovec *iov_r = iovstack_r;
265 struct iov_iter iter;
266 ssize_t rc;
267 int dir = vm_write ? WRITE : READ;
268
269 if (flags != 0)
270 return -EINVAL;
271
272
273 rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
274 if (rc < 0)
275 return rc;
276 if (!iov_iter_count(&iter))
277 goto free_iovecs;
278
279 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
280 iovstack_r, &iov_r);
281 if (rc <= 0)
282 goto free_iovecs;
283
284 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
285
286 free_iovecs:
287 if (iov_r != iovstack_r)
288 kfree(iov_r);
289 kfree(iov_l);
290
291 return rc;
292 }
293
294 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
295 unsigned long, liovcnt, const struct iovec __user *, rvec,
296 unsigned long, riovcnt, unsigned long, flags)
297 {
298 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
299 }
300
301 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
302 const struct iovec __user *, lvec,
303 unsigned long, liovcnt, const struct iovec __user *, rvec,
304 unsigned long, riovcnt, unsigned long, flags)
305 {
306 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
307 }
308
309 #ifdef CONFIG_COMPAT
310
311 static ssize_t
312 compat_process_vm_rw(compat_pid_t pid,
313 const struct compat_iovec __user *lvec,
314 unsigned long liovcnt,
315 const struct compat_iovec __user *rvec,
316 unsigned long riovcnt,
317 unsigned long flags, int vm_write)
318 {
319 struct iovec iovstack_l[UIO_FASTIOV];
320 struct iovec iovstack_r[UIO_FASTIOV];
321 struct iovec *iov_l = iovstack_l;
322 struct iovec *iov_r = iovstack_r;
323 struct iov_iter iter;
324 ssize_t rc = -EFAULT;
325 int dir = vm_write ? WRITE : READ;
326
327 if (flags != 0)
328 return -EINVAL;
329
330 rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
331 if (rc < 0)
332 return rc;
333 if (!iov_iter_count(&iter))
334 goto free_iovecs;
335 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
336 UIO_FASTIOV, iovstack_r,
337 &iov_r);
338 if (rc <= 0)
339 goto free_iovecs;
340
341 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
342
343 free_iovecs:
344 if (iov_r != iovstack_r)
345 kfree(iov_r);
346 kfree(iov_l);
347 return rc;
348 }
349
350 COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
351 const struct compat_iovec __user *, lvec,
352 compat_ulong_t, liovcnt,
353 const struct compat_iovec __user *, rvec,
354 compat_ulong_t, riovcnt,
355 compat_ulong_t, flags)
356 {
357 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
358 riovcnt, flags, 0);
359 }
360
361 COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
362 const struct compat_iovec __user *, lvec,
363 compat_ulong_t, liovcnt,
364 const struct compat_iovec __user *, rvec,
365 compat_ulong_t, riovcnt,
366 compat_ulong_t, flags)
367 {
368 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
369 riovcnt, flags, 1);
370 }
371
372 #endif