page2 716 drivers/md/raid5-ppl.c static void ppl_xor(int size, struct page *page1, struct page *page2) page2 720 drivers/md/raid5-ppl.c struct page *xor_srcs[] = { page1, page2 }; page2 803 drivers/md/raid5-ppl.c struct page *page2; page2 815 drivers/md/raid5-ppl.c page2 = alloc_page(GFP_KERNEL); page2 817 drivers/md/raid5-ppl.c if (!page1 || !page2) { page2 907 drivers/md/raid5-ppl.c if (!sync_page_io(rdev, sector, block_size, page2, page2 916 drivers/md/raid5-ppl.c ppl_xor(block_size, page1, page2); page2 930 drivers/md/raid5-ppl.c block_size, page2, REQ_OP_READ, 0, page2 939 drivers/md/raid5-ppl.c ppl_xor(block_size, page1, page2); page2 965 drivers/md/raid5-ppl.c if (page2) page2 966 drivers/md/raid5-ppl.c __free_page(page2); page2 1084 drivers/md/raid5-ppl.c struct page *page, *page2, *tmp; page2 1097 drivers/md/raid5-ppl.c page2 = alloc_page(GFP_KERNEL); page2 1098 drivers/md/raid5-ppl.c if (!page2) { page2 1160 drivers/md/raid5-ppl.c page = page2; page2 1161 drivers/md/raid5-ppl.c page2 = tmp; page2 1187 drivers/md/raid5-ppl.c __free_page(page2); page2 26 drivers/scsi/ses.c unsigned char *page2; page2 42 drivers/scsi/ses.c return (ses_dev->page2 != NULL); page2 141 drivers/scsi/ses.c unsigned char *desc_ptr = ses_dev->page2 + 8; page2 161 drivers/scsi/ses.c return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); page2 171 drivers/scsi/ses.c unsigned char *desc_ptr = ses_dev->page2 + 8; page2 173 drivers/scsi/ses.c if (ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len) < 0) page2 711 drivers/scsi/ses.c ses_dev->page2 = buf; page2 771 drivers/scsi/ses.c kfree(ses_dev->page2); page2 813 drivers/scsi/ses.c kfree(ses_dev->page2); page2 103 fs/nfs/nfs4namespace.c char *page, char *page2) page2 111 fs/nfs/nfs4namespace.c fs_path = nfs4_pathname_string(&locations->fs_path, page2, PAGE_SIZE); page2 240 fs/nfs/nfs4namespace.c char *page, char *page2, page2 250 fs/nfs/nfs4namespace.c mnt_path = nfs4_pathname_string(&location->rootpath, page2, PAGE_SIZE); page2 254 fs/nfs/nfs4namespace.c maxbuflen = mnt_path - 1 - page2; page2 274 fs/nfs/nfs4namespace.c memcpy(page2, buf->data, buf->len); page2 275 fs/nfs/nfs4namespace.c page2[buf->len] = '\0'; page2 276 fs/nfs/nfs4namespace.c mountdata->hostname = page2; page2 305 fs/nfs/nfs4namespace.c char *page = NULL, *page2 = NULL; page2 317 fs/nfs/nfs4namespace.c page2 = (char *) __get_free_page(GFP_USER); page2 318 fs/nfs/nfs4namespace.c if (!page2) page2 322 fs/nfs/nfs4namespace.c error = nfs4_validate_fspath(dentry, locations, page, page2); page2 335 fs/nfs/nfs4namespace.c mnt = try_location(&mountdata, page, page2, location); page2 342 fs/nfs/nfs4namespace.c free_page((unsigned long) page2); page2 424 fs/nfs/nfs4namespace.c char *page, char *page2, page2 485 fs/nfs/nfs4namespace.c char *page = NULL, *page2 = NULL; page2 496 fs/nfs/nfs4namespace.c page2 = (char *) __get_free_page(GFP_USER); page2 497 fs/nfs/nfs4namespace.c if (!page2) page2 509 fs/nfs/nfs4namespace.c page2, location); page2 516 fs/nfs/nfs4namespace.c free_page((unsigned long)page2); page2 1831 fs/read_write.c static void vfs_lock_two_pages(struct page *page1, struct page *page2) page2 1834 fs/read_write.c if (page1->index > page2->index) page2 1835 fs/read_write.c swap(page1, page2); page2 1838 fs/read_write.c if (page1 != page2) page2 1839 fs/read_write.c lock_page(page2); page2 1843 fs/read_write.c static void vfs_unlock_two_pages(struct page *page1, struct page *page2) page2 1846 fs/read_write.c if (page1 != page2) page2 1847 fs/read_write.c unlock_page(page2); page2 2897 include/linux/mm.h extern int memcmp_pages(struct page *page1, struct page *page2); page2 2899 include/linux/mm.h static inline int pages_identical(struct page *page1, struct page *page2) page2 2901 include/linux/mm.h return !memcmp_pages(page1, page2); page2 171 mm/migrate.c struct page *page2; page2 173 mm/migrate.c list_for_each_entry_safe(page, page2, l, lru) { page2 1408 mm/migrate.c struct page *page2; page2 1418 mm/migrate.c list_for_each_entry_safe(page, page2, from, lru) { page2 1449 mm/migrate.c list_safe_reset_next(page, page2, lru); page2 1838 mm/slub.c struct page *page, *page2; page2 1853 mm/slub.c list_for_each_entry_safe(page, page2, &n->partial, slab_list) { page2 910 mm/util.c int memcmp_pages(struct page *page1, struct page *page2) page2 916 mm/util.c addr2 = kmap_atomic(page2); page2 416 tools/testing/selftests/vm/mlock2-tests.c struct vm_boundaries page2; page2 436 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size, &page2) || page2 447 tools/testing/selftests/vm/mlock2-tests.c if (page1.start != page2.start || page2.start != page3.start) { page2 459 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size, &page2) || page2 466 tools/testing/selftests/vm/mlock2-tests.c if (page1.start == page2.start || page2.start == page3.start) { page2 478 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size, &page2) || page2 485 tools/testing/selftests/vm/mlock2-tests.c if (page1.start != page2.start || page2.start != page3.start) {