1/* 2 * Stress userfaultfd syscall. 3 * 4 * Copyright (C) 2015 Red Hat, Inc. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 * This test allocates two virtual areas and bounces the physical 10 * memory across the two virtual areas (from area_src to area_dst) 11 * using userfaultfd. 12 * 13 * There are three threads running per CPU: 14 * 15 * 1) one per-CPU thread takes a per-page pthread_mutex in a random 16 * page of the area_dst (while the physical page may still be in 17 * area_src), and increments a per-page counter in the same page, 18 * and checks its value against a verification region. 19 * 20 * 2) another per-CPU thread handles the userfaults generated by 21 * thread 1 above. userfaultfd blocking reads or poll() modes are 22 * exercised interleaved. 23 * 24 * 3) one last per-CPU thread transfers the memory in the background 25 * at maximum bandwidth (if not already transferred by thread 26 * 2). Each cpu thread takes cares of transferring a portion of the 27 * area. 28 * 29 * When all threads of type 3 completed the transfer, one bounce is 30 * complete. area_src and area_dst are then swapped. All threads are 31 * respawned and so the bounce is immediately restarted in the 32 * opposite direction. 33 * 34 * per-CPU threads 1 by triggering userfaults inside 35 * pthread_mutex_lock will also verify the atomicity of the memory 36 * transfer (UFFDIO_COPY). 37 * 38 * The program takes two parameters: the amounts of physical memory in 39 * megabytes (MiB) of the area and the number of bounces to execute. 40 * 41 * # 100MiB 99999 bounces 42 * ./userfaultfd 100 99999 43 * 44 * # 1GiB 99 bounces 45 * ./userfaultfd 1000 99 46 * 47 * # 10MiB-~6GiB 999 bounces, continue forever unless an error triggers 48 * while ./userfaultfd $[RANDOM % 6000 + 10] 999; do true; done 49 */ 50 51#define _GNU_SOURCE 52#include <stdio.h> 53#include <errno.h> 54#include <unistd.h> 55#include <stdlib.h> 56#include <sys/types.h> 57#include <sys/stat.h> 58#include <fcntl.h> 59#include <time.h> 60#include <signal.h> 61#include <poll.h> 62#include <string.h> 63#include <sys/mman.h> 64#include <sys/syscall.h> 65#include <sys/ioctl.h> 66#include <pthread.h> 67#include <linux/userfaultfd.h> 68 69#ifdef __NR_userfaultfd 70 71static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; 72 73#define BOUNCE_RANDOM (1<<0) 74#define BOUNCE_RACINGFAULTS (1<<1) 75#define BOUNCE_VERIFY (1<<2) 76#define BOUNCE_POLL (1<<3) 77static int bounces; 78 79static unsigned long long *count_verify; 80static int uffd, finished, *pipefd; 81static char *area_src, *area_dst; 82static char *zeropage; 83pthread_attr_t attr; 84 85/* pthread_mutex_t starts at page offset 0 */ 86#define area_mutex(___area, ___nr) \ 87 ((pthread_mutex_t *) ((___area) + (___nr)*page_size)) 88/* 89 * count is placed in the page after pthread_mutex_t naturally aligned 90 * to avoid non alignment faults on non-x86 archs. 91 */ 92#define area_count(___area, ___nr) \ 93 ((volatile unsigned long long *) ((unsigned long) \ 94 ((___area) + (___nr)*page_size + \ 95 sizeof(pthread_mutex_t) + \ 96 sizeof(unsigned long long) - 1) & \ 97 ~(unsigned long)(sizeof(unsigned long long) \ 98 - 1))) 99 100static int my_bcmp(char *str1, char *str2, size_t n) 101{ 102 unsigned long i; 103 for (i = 0; i < n; i++) 104 if (str1[i] != str2[i]) 105 return 1; 106 return 0; 107} 108 109static void *locking_thread(void *arg) 110{ 111 unsigned long cpu = (unsigned long) arg; 112 struct random_data rand; 113 unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */ 114 int32_t rand_nr; 115 unsigned long long count; 116 char randstate[64]; 117 unsigned int seed; 118 time_t start; 119 120 if (bounces & BOUNCE_RANDOM) { 121 seed = (unsigned int) time(NULL) - bounces; 122 if (!(bounces & BOUNCE_RACINGFAULTS)) 123 seed += cpu; 124 bzero(&rand, sizeof(rand)); 125 bzero(&randstate, sizeof(randstate)); 126 if (initstate_r(seed, randstate, sizeof(randstate), &rand)) 127 fprintf(stderr, "srandom_r error\n"), exit(1); 128 } else { 129 page_nr = -bounces; 130 if (!(bounces & BOUNCE_RACINGFAULTS)) 131 page_nr += cpu * nr_pages_per_cpu; 132 } 133 134 while (!finished) { 135 if (bounces & BOUNCE_RANDOM) { 136 if (random_r(&rand, &rand_nr)) 137 fprintf(stderr, "random_r 1 error\n"), exit(1); 138 page_nr = rand_nr; 139 if (sizeof(page_nr) > sizeof(rand_nr)) { 140 if (random_r(&rand, &rand_nr)) 141 fprintf(stderr, "random_r 2 error\n"), exit(1); 142 page_nr |= (((unsigned long) rand_nr) << 16) << 143 16; 144 } 145 } else 146 page_nr += 1; 147 page_nr %= nr_pages; 148 149 start = time(NULL); 150 if (bounces & BOUNCE_VERIFY) { 151 count = *area_count(area_dst, page_nr); 152 if (!count) 153 fprintf(stderr, 154 "page_nr %lu wrong count %Lu %Lu\n", 155 page_nr, count, 156 count_verify[page_nr]), exit(1); 157 158 159 /* 160 * We can't use bcmp (or memcmp) because that 161 * returns 0 erroneously if the memory is 162 * changing under it (even if the end of the 163 * page is never changing and always 164 * different). 165 */ 166#if 1 167 if (!my_bcmp(area_dst + page_nr * page_size, zeropage, 168 page_size)) 169 fprintf(stderr, 170 "my_bcmp page_nr %lu wrong count %Lu %Lu\n", 171 page_nr, count, 172 count_verify[page_nr]), exit(1); 173#else 174 unsigned long loops; 175 176 loops = 0; 177 /* uncomment the below line to test with mutex */ 178 /* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */ 179 while (!bcmp(area_dst + page_nr * page_size, zeropage, 180 page_size)) { 181 loops += 1; 182 if (loops > 10) 183 break; 184 } 185 /* uncomment below line to test with mutex */ 186 /* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */ 187 if (loops) { 188 fprintf(stderr, 189 "page_nr %lu all zero thread %lu %p %lu\n", 190 page_nr, cpu, area_dst + page_nr * page_size, 191 loops); 192 if (loops > 10) 193 exit(1); 194 } 195#endif 196 } 197 198 pthread_mutex_lock(area_mutex(area_dst, page_nr)); 199 count = *area_count(area_dst, page_nr); 200 if (count != count_verify[page_nr]) { 201 fprintf(stderr, 202 "page_nr %lu memory corruption %Lu %Lu\n", 203 page_nr, count, 204 count_verify[page_nr]), exit(1); 205 } 206 count++; 207 *area_count(area_dst, page_nr) = count_verify[page_nr] = count; 208 pthread_mutex_unlock(area_mutex(area_dst, page_nr)); 209 210 if (time(NULL) - start > 1) 211 fprintf(stderr, 212 "userfault too slow %ld " 213 "possible false positive with overcommit\n", 214 time(NULL) - start); 215 } 216 217 return NULL; 218} 219 220static int copy_page(unsigned long offset) 221{ 222 struct uffdio_copy uffdio_copy; 223 224 if (offset >= nr_pages * page_size) 225 fprintf(stderr, "unexpected offset %lu\n", 226 offset), exit(1); 227 uffdio_copy.dst = (unsigned long) area_dst + offset; 228 uffdio_copy.src = (unsigned long) area_src + offset; 229 uffdio_copy.len = page_size; 230 uffdio_copy.mode = 0; 231 uffdio_copy.copy = 0; 232 if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy)) { 233 /* real retval in ufdio_copy.copy */ 234 if (uffdio_copy.copy != -EEXIST) 235 fprintf(stderr, "UFFDIO_COPY error %Ld\n", 236 uffdio_copy.copy), exit(1); 237 } else if (uffdio_copy.copy != page_size) { 238 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", 239 uffdio_copy.copy), exit(1); 240 } else 241 return 1; 242 return 0; 243} 244 245static void *uffd_poll_thread(void *arg) 246{ 247 unsigned long cpu = (unsigned long) arg; 248 struct pollfd pollfd[2]; 249 struct uffd_msg msg; 250 int ret; 251 unsigned long offset; 252 char tmp_chr; 253 unsigned long userfaults = 0; 254 255 pollfd[0].fd = uffd; 256 pollfd[0].events = POLLIN; 257 pollfd[1].fd = pipefd[cpu*2]; 258 pollfd[1].events = POLLIN; 259 260 for (;;) { 261 ret = poll(pollfd, 2, -1); 262 if (!ret) 263 fprintf(stderr, "poll error %d\n", ret), exit(1); 264 if (ret < 0) 265 perror("poll"), exit(1); 266 if (pollfd[1].revents & POLLIN) { 267 if (read(pollfd[1].fd, &tmp_chr, 1) != 1) 268 fprintf(stderr, "read pipefd error\n"), 269 exit(1); 270 break; 271 } 272 if (!(pollfd[0].revents & POLLIN)) 273 fprintf(stderr, "pollfd[0].revents %d\n", 274 pollfd[0].revents), exit(1); 275 ret = read(uffd, &msg, sizeof(msg)); 276 if (ret < 0) { 277 if (errno == EAGAIN) 278 continue; 279 perror("nonblocking read error"), exit(1); 280 } 281 if (msg.event != UFFD_EVENT_PAGEFAULT) 282 fprintf(stderr, "unexpected msg event %u\n", 283 msg.event), exit(1); 284 if (msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) 285 fprintf(stderr, "unexpected write fault\n"), exit(1); 286 offset = (char *)(unsigned long)msg.arg.pagefault.address - 287 area_dst; 288 offset &= ~(page_size-1); 289 if (copy_page(offset)) 290 userfaults++; 291 } 292 return (void *)userfaults; 293} 294 295pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER; 296 297static void *uffd_read_thread(void *arg) 298{ 299 unsigned long *this_cpu_userfaults; 300 struct uffd_msg msg; 301 unsigned long offset; 302 int ret; 303 304 this_cpu_userfaults = (unsigned long *) arg; 305 *this_cpu_userfaults = 0; 306 307 pthread_mutex_unlock(&uffd_read_mutex); 308 /* from here cancellation is ok */ 309 310 for (;;) { 311 ret = read(uffd, &msg, sizeof(msg)); 312 if (ret != sizeof(msg)) { 313 if (ret < 0) 314 perror("blocking read error"), exit(1); 315 else 316 fprintf(stderr, "short read\n"), exit(1); 317 } 318 if (msg.event != UFFD_EVENT_PAGEFAULT) 319 fprintf(stderr, "unexpected msg event %u\n", 320 msg.event), exit(1); 321 if (bounces & BOUNCE_VERIFY && 322 msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) 323 fprintf(stderr, "unexpected write fault\n"), exit(1); 324 offset = (char *)(unsigned long)msg.arg.pagefault.address - 325 area_dst; 326 offset &= ~(page_size-1); 327 if (copy_page(offset)) 328 (*this_cpu_userfaults)++; 329 } 330 return (void *)NULL; 331} 332 333static void *background_thread(void *arg) 334{ 335 unsigned long cpu = (unsigned long) arg; 336 unsigned long page_nr; 337 338 for (page_nr = cpu * nr_pages_per_cpu; 339 page_nr < (cpu+1) * nr_pages_per_cpu; 340 page_nr++) 341 copy_page(page_nr * page_size); 342 343 return NULL; 344} 345 346static int stress(unsigned long *userfaults) 347{ 348 unsigned long cpu; 349 pthread_t locking_threads[nr_cpus]; 350 pthread_t uffd_threads[nr_cpus]; 351 pthread_t background_threads[nr_cpus]; 352 void **_userfaults = (void **) userfaults; 353 354 finished = 0; 355 for (cpu = 0; cpu < nr_cpus; cpu++) { 356 if (pthread_create(&locking_threads[cpu], &attr, 357 locking_thread, (void *)cpu)) 358 return 1; 359 if (bounces & BOUNCE_POLL) { 360 if (pthread_create(&uffd_threads[cpu], &attr, 361 uffd_poll_thread, (void *)cpu)) 362 return 1; 363 } else { 364 if (pthread_create(&uffd_threads[cpu], &attr, 365 uffd_read_thread, 366 &_userfaults[cpu])) 367 return 1; 368 pthread_mutex_lock(&uffd_read_mutex); 369 } 370 if (pthread_create(&background_threads[cpu], &attr, 371 background_thread, (void *)cpu)) 372 return 1; 373 } 374 for (cpu = 0; cpu < nr_cpus; cpu++) 375 if (pthread_join(background_threads[cpu], NULL)) 376 return 1; 377 378 /* 379 * Be strict and immediately zap area_src, the whole area has 380 * been transferred already by the background treads. The 381 * area_src could then be faulted in in a racy way by still 382 * running uffdio_threads reading zeropages after we zapped 383 * area_src (but they're guaranteed to get -EEXIST from 384 * UFFDIO_COPY without writing zero pages into area_dst 385 * because the background threads already completed). 386 */ 387 if (madvise(area_src, nr_pages * page_size, MADV_DONTNEED)) { 388 perror("madvise"); 389 return 1; 390 } 391 392 for (cpu = 0; cpu < nr_cpus; cpu++) { 393 char c; 394 if (bounces & BOUNCE_POLL) { 395 if (write(pipefd[cpu*2+1], &c, 1) != 1) { 396 fprintf(stderr, "pipefd write error\n"); 397 return 1; 398 } 399 if (pthread_join(uffd_threads[cpu], &_userfaults[cpu])) 400 return 1; 401 } else { 402 if (pthread_cancel(uffd_threads[cpu])) 403 return 1; 404 if (pthread_join(uffd_threads[cpu], NULL)) 405 return 1; 406 } 407 } 408 409 finished = 1; 410 for (cpu = 0; cpu < nr_cpus; cpu++) 411 if (pthread_join(locking_threads[cpu], NULL)) 412 return 1; 413 414 return 0; 415} 416 417static int userfaultfd_stress(void) 418{ 419 void *area; 420 char *tmp_area; 421 unsigned long nr; 422 struct uffdio_register uffdio_register; 423 struct uffdio_api uffdio_api; 424 unsigned long cpu; 425 int uffd_flags, err; 426 unsigned long userfaults[nr_cpus]; 427 428 if (posix_memalign(&area, page_size, nr_pages * page_size)) { 429 fprintf(stderr, "out of memory\n"); 430 return 1; 431 } 432 area_src = area; 433 if (posix_memalign(&area, page_size, nr_pages * page_size)) { 434 fprintf(stderr, "out of memory\n"); 435 return 1; 436 } 437 area_dst = area; 438 439 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 440 if (uffd < 0) { 441 fprintf(stderr, 442 "userfaultfd syscall not available in this kernel\n"); 443 return 1; 444 } 445 uffd_flags = fcntl(uffd, F_GETFD, NULL); 446 447 uffdio_api.api = UFFD_API; 448 uffdio_api.features = 0; 449 if (ioctl(uffd, UFFDIO_API, &uffdio_api)) { 450 fprintf(stderr, "UFFDIO_API\n"); 451 return 1; 452 } 453 if (uffdio_api.api != UFFD_API) { 454 fprintf(stderr, "UFFDIO_API error %Lu\n", uffdio_api.api); 455 return 1; 456 } 457 458 count_verify = malloc(nr_pages * sizeof(unsigned long long)); 459 if (!count_verify) { 460 perror("count_verify"); 461 return 1; 462 } 463 464 for (nr = 0; nr < nr_pages; nr++) { 465 *area_mutex(area_src, nr) = (pthread_mutex_t) 466 PTHREAD_MUTEX_INITIALIZER; 467 count_verify[nr] = *area_count(area_src, nr) = 1; 468 /* 469 * In the transition between 255 to 256, powerpc will 470 * read out of order in my_bcmp and see both bytes as 471 * zero, so leave a placeholder below always non-zero 472 * after the count, to avoid my_bcmp to trigger false 473 * positives. 474 */ 475 *(area_count(area_src, nr) + 1) = 1; 476 } 477 478 pipefd = malloc(sizeof(int) * nr_cpus * 2); 479 if (!pipefd) { 480 perror("pipefd"); 481 return 1; 482 } 483 for (cpu = 0; cpu < nr_cpus; cpu++) { 484 if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) { 485 perror("pipe"); 486 return 1; 487 } 488 } 489 490 if (posix_memalign(&area, page_size, page_size)) { 491 fprintf(stderr, "out of memory\n"); 492 return 1; 493 } 494 zeropage = area; 495 bzero(zeropage, page_size); 496 497 pthread_mutex_lock(&uffd_read_mutex); 498 499 pthread_attr_init(&attr); 500 pthread_attr_setstacksize(&attr, 16*1024*1024); 501 502 err = 0; 503 while (bounces--) { 504 unsigned long expected_ioctls; 505 506 printf("bounces: %d, mode:", bounces); 507 if (bounces & BOUNCE_RANDOM) 508 printf(" rnd"); 509 if (bounces & BOUNCE_RACINGFAULTS) 510 printf(" racing"); 511 if (bounces & BOUNCE_VERIFY) 512 printf(" ver"); 513 if (bounces & BOUNCE_POLL) 514 printf(" poll"); 515 printf(", "); 516 fflush(stdout); 517 518 if (bounces & BOUNCE_POLL) 519 fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); 520 else 521 fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK); 522 523 /* register */ 524 uffdio_register.range.start = (unsigned long) area_dst; 525 uffdio_register.range.len = nr_pages * page_size; 526 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; 527 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) { 528 fprintf(stderr, "register failure\n"); 529 return 1; 530 } 531 expected_ioctls = (1 << _UFFDIO_WAKE) | 532 (1 << _UFFDIO_COPY) | 533 (1 << _UFFDIO_ZEROPAGE); 534 if ((uffdio_register.ioctls & expected_ioctls) != 535 expected_ioctls) { 536 fprintf(stderr, 537 "unexpected missing ioctl for anon memory\n"); 538 return 1; 539 } 540 541 /* 542 * The madvise done previously isn't enough: some 543 * uffd_thread could have read userfaults (one of 544 * those already resolved by the background thread) 545 * and it may be in the process of calling 546 * UFFDIO_COPY. UFFDIO_COPY will read the zapped 547 * area_src and it would map a zero page in it (of 548 * course such a UFFDIO_COPY is perfectly safe as it'd 549 * return -EEXIST). The problem comes at the next 550 * bounce though: that racing UFFDIO_COPY would 551 * generate zeropages in the area_src, so invalidating 552 * the previous MADV_DONTNEED. Without this additional 553 * MADV_DONTNEED those zeropages leftovers in the 554 * area_src would lead to -EEXIST failure during the 555 * next bounce, effectively leaving a zeropage in the 556 * area_dst. 557 * 558 * Try to comment this out madvise to see the memory 559 * corruption being caught pretty quick. 560 * 561 * khugepaged is also inhibited to collapse THP after 562 * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's 563 * required to MADV_DONTNEED here. 564 */ 565 if (madvise(area_dst, nr_pages * page_size, MADV_DONTNEED)) { 566 perror("madvise 2"); 567 return 1; 568 } 569 570 /* bounce pass */ 571 if (stress(userfaults)) 572 return 1; 573 574 /* unregister */ 575 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) { 576 fprintf(stderr, "register failure\n"); 577 return 1; 578 } 579 580 /* verification */ 581 if (bounces & BOUNCE_VERIFY) { 582 for (nr = 0; nr < nr_pages; nr++) { 583 if (*area_count(area_dst, nr) != count_verify[nr]) { 584 fprintf(stderr, 585 "error area_count %Lu %Lu %lu\n", 586 *area_count(area_src, nr), 587 count_verify[nr], 588 nr); 589 err = 1; 590 bounces = 0; 591 } 592 } 593 } 594 595 /* prepare next bounce */ 596 tmp_area = area_src; 597 area_src = area_dst; 598 area_dst = tmp_area; 599 600 printf("userfaults:"); 601 for (cpu = 0; cpu < nr_cpus; cpu++) 602 printf(" %lu", userfaults[cpu]); 603 printf("\n"); 604 } 605 606 return err; 607} 608 609int main(int argc, char **argv) 610{ 611 if (argc < 3) 612 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); 613 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 614 page_size = sysconf(_SC_PAGE_SIZE); 615 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2 616 > page_size) 617 fprintf(stderr, "Impossible to run this test\n"), exit(2); 618 nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / 619 nr_cpus; 620 if (!nr_pages_per_cpu) { 621 fprintf(stderr, "invalid MiB\n"); 622 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); 623 } 624 bounces = atoi(argv[2]); 625 if (bounces <= 0) { 626 fprintf(stderr, "invalid bounces\n"); 627 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); 628 } 629 nr_pages = nr_pages_per_cpu * nr_cpus; 630 printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", 631 nr_pages, nr_pages_per_cpu); 632 return userfaultfd_stress(); 633} 634 635#else /* __NR_userfaultfd */ 636 637#warning "missing __NR_userfaultfd definition" 638 639int main(void) 640{ 641 printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); 642 return 0; 643} 644 645#endif /* __NR_userfaultfd */ 646