This source file includes following definitions.
- mapping_set_error
- mapping_set_unevictable
- mapping_clear_unevictable
- mapping_unevictable
- mapping_set_exiting
- mapping_exiting
- mapping_set_no_writeback_tags
- mapping_use_writeback_tags
- mapping_gfp_mask
- mapping_gfp_constraint
- mapping_set_gfp_mask
- __page_cache_add_speculative
- page_cache_get_speculative
- page_cache_add_speculative
- __page_cache_alloc
- page_cache_alloc
- readahead_gfp_mask
- find_get_page
- find_get_page_flags
- find_lock_page
- find_or_create_page
- grab_cache_page_nowait
- find_subpage
- find_get_pages
- find_get_pages_tag
- grab_cache_page
- read_mapping_page
- page_to_index
- page_to_pgoff
- page_offset
- page_file_offset
- linear_page_index
- trylock_page
- lock_page
- lock_page_killable
- lock_page_or_retry
- wait_on_page_locked
- wait_on_page_locked_killable
- fault_in_pages_writeable
- fault_in_pages_readable
- add_to_page_cache
- dir_pages
   1 
   2 #ifndef _LINUX_PAGEMAP_H
   3 #define _LINUX_PAGEMAP_H
   4 
   5 
   6 
   7 
   8 #include <linux/mm.h>
   9 #include <linux/fs.h>
  10 #include <linux/list.h>
  11 #include <linux/highmem.h>
  12 #include <linux/compiler.h>
  13 #include <linux/uaccess.h>
  14 #include <linux/gfp.h>
  15 #include <linux/bitops.h>
  16 #include <linux/hardirq.h> 
  17 #include <linux/hugetlb_inline.h>
  18 
  19 struct pagevec;
  20 
  21 
  22 
  23 
  24 enum mapping_flags {
  25         AS_EIO          = 0,    
  26         AS_ENOSPC       = 1,    
  27         AS_MM_ALL_LOCKS = 2,    
  28         AS_UNEVICTABLE  = 3,    
  29         AS_EXITING      = 4,    
  30         
  31         AS_NO_WRITEBACK_TAGS = 5,
  32 };
  33 
  34 
  35 
  36 
  37 
  38 
  39 
  40 
  41 
  42 
  43 
  44 
  45 
  46 
  47 
  48 static inline void mapping_set_error(struct address_space *mapping, int error)
  49 {
  50         if (likely(!error))
  51                 return;
  52 
  53         
  54         filemap_set_wb_err(mapping, error);
  55 
  56         
  57         if (error == -ENOSPC)
  58                 set_bit(AS_ENOSPC, &mapping->flags);
  59         else
  60                 set_bit(AS_EIO, &mapping->flags);
  61 }
  62 
  63 static inline void mapping_set_unevictable(struct address_space *mapping)
  64 {
  65         set_bit(AS_UNEVICTABLE, &mapping->flags);
  66 }
  67 
  68 static inline void mapping_clear_unevictable(struct address_space *mapping)
  69 {
  70         clear_bit(AS_UNEVICTABLE, &mapping->flags);
  71 }
  72 
  73 static inline int mapping_unevictable(struct address_space *mapping)
  74 {
  75         if (mapping)
  76                 return test_bit(AS_UNEVICTABLE, &mapping->flags);
  77         return !!mapping;
  78 }
  79 
  80 static inline void mapping_set_exiting(struct address_space *mapping)
  81 {
  82         set_bit(AS_EXITING, &mapping->flags);
  83 }
  84 
  85 static inline int mapping_exiting(struct address_space *mapping)
  86 {
  87         return test_bit(AS_EXITING, &mapping->flags);
  88 }
  89 
  90 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  91 {
  92         set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  93 }
  94 
  95 static inline int mapping_use_writeback_tags(struct address_space *mapping)
  96 {
  97         return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  98 }
  99 
 100 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 101 {
 102         return mapping->gfp_mask;
 103 }
 104 
 105 
 106 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
 107                 gfp_t gfp_mask)
 108 {
 109         return mapping_gfp_mask(mapping) & gfp_mask;
 110 }
 111 
 112 
 113 
 114 
 115 
 116 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 117 {
 118         m->gfp_mask = mask;
 119 }
 120 
 121 void release_pages(struct page **pages, int nr);
 122 
 123 
 124 
 125 
 126 
 127 
 128 
 129 
 130 
 131 
 132 
 133 
 134 
 135 
 136 
 137 
 138 
 139 
 140 
 141 
 142 
 143 
 144 
 145 
 146 
 147 
 148 
 149 
 150 
 151 
 152 
 153 
 154 
 155 
 156 
 157 
 158 
 159 
 160 
 161 
 162 
 163 
 164 
 165 
 166 
 167 static inline int __page_cache_add_speculative(struct page *page, int count)
 168 {
 169 #ifdef CONFIG_TINY_RCU
 170 # ifdef CONFIG_PREEMPT_COUNT
 171         VM_BUG_ON(!in_atomic() && !irqs_disabled());
 172 # endif
 173         
 174 
 175 
 176 
 177 
 178 
 179 
 180 
 181 
 182         VM_BUG_ON_PAGE(page_count(page) == 0, page);
 183         page_ref_add(page, count);
 184 
 185 #else
 186         if (unlikely(!page_ref_add_unless(page, count, 0))) {
 187                 
 188 
 189 
 190 
 191 
 192                 return 0;
 193         }
 194 #endif
 195         VM_BUG_ON_PAGE(PageTail(page), page);
 196 
 197         return 1;
 198 }
 199 
 200 static inline int page_cache_get_speculative(struct page *page)
 201 {
 202         return __page_cache_add_speculative(page, 1);
 203 }
 204 
 205 static inline int page_cache_add_speculative(struct page *page, int count)
 206 {
 207         return __page_cache_add_speculative(page, count);
 208 }
 209 
 210 #ifdef CONFIG_NUMA
 211 extern struct page *__page_cache_alloc(gfp_t gfp);
 212 #else
 213 static inline struct page *__page_cache_alloc(gfp_t gfp)
 214 {
 215         return alloc_pages(gfp, 0);
 216 }
 217 #endif
 218 
 219 static inline struct page *page_cache_alloc(struct address_space *x)
 220 {
 221         return __page_cache_alloc(mapping_gfp_mask(x));
 222 }
 223 
 224 static inline gfp_t readahead_gfp_mask(struct address_space *x)
 225 {
 226         return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
 227 }
 228 
 229 typedef int filler_t(void *, struct page *);
 230 
 231 pgoff_t page_cache_next_miss(struct address_space *mapping,
 232                              pgoff_t index, unsigned long max_scan);
 233 pgoff_t page_cache_prev_miss(struct address_space *mapping,
 234                              pgoff_t index, unsigned long max_scan);
 235 
 236 #define FGP_ACCESSED            0x00000001
 237 #define FGP_LOCK                0x00000002
 238 #define FGP_CREAT               0x00000004
 239 #define FGP_WRITE               0x00000008
 240 #define FGP_NOFS                0x00000010
 241 #define FGP_NOWAIT              0x00000020
 242 #define FGP_FOR_MMAP            0x00000040
 243 
 244 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 245                 int fgp_flags, gfp_t cache_gfp_mask);
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 
 256 
 257 static inline struct page *find_get_page(struct address_space *mapping,
 258                                         pgoff_t offset)
 259 {
 260         return pagecache_get_page(mapping, offset, 0, 0);
 261 }
 262 
 263 static inline struct page *find_get_page_flags(struct address_space *mapping,
 264                                         pgoff_t offset, int fgp_flags)
 265 {
 266         return pagecache_get_page(mapping, offset, fgp_flags, 0);
 267 }
 268 
 269 
 270 
 271 
 272 
 273 
 274 
 275 
 276 
 277 
 278 
 279 
 280 
 281 
 282 static inline struct page *find_lock_page(struct address_space *mapping,
 283                                         pgoff_t offset)
 284 {
 285         return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 286 }
 287 
 288 
 289 
 290 
 291 
 292 
 293 
 294 
 295 
 296 
 297 
 298 
 299 
 300 
 301 
 302 
 303 
 304 
 305 
 306 
 307 static inline struct page *find_or_create_page(struct address_space *mapping,
 308                                         pgoff_t offset, gfp_t gfp_mask)
 309 {
 310         return pagecache_get_page(mapping, offset,
 311                                         FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
 312                                         gfp_mask);
 313 }
 314 
 315 
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324 
 325 
 326 
 327 
 328 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 329                                 pgoff_t index)
 330 {
 331         return pagecache_get_page(mapping, index,
 332                         FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
 333                         mapping_gfp_mask(mapping));
 334 }
 335 
 336 static inline struct page *find_subpage(struct page *page, pgoff_t offset)
 337 {
 338         if (PageHuge(page))
 339                 return page;
 340 
 341         VM_BUG_ON_PAGE(PageTail(page), page);
 342 
 343         return page + (offset & (compound_nr(page) - 1));
 344 }
 345 
 346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
 347 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
 348 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 349                           unsigned int nr_entries, struct page **entries,
 350                           pgoff_t *indices);
 351 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 352                         pgoff_t end, unsigned int nr_pages,
 353                         struct page **pages);
 354 static inline unsigned find_get_pages(struct address_space *mapping,
 355                         pgoff_t *start, unsigned int nr_pages,
 356                         struct page **pages)
 357 {
 358         return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
 359                                     pages);
 360 }
 361 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 362                                unsigned int nr_pages, struct page **pages);
 363 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
 364                         pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
 365                         struct page **pages);
 366 static inline unsigned find_get_pages_tag(struct address_space *mapping,
 367                         pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
 368                         struct page **pages)
 369 {
 370         return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
 371                                         nr_pages, pages);
 372 }
 373 
 374 struct page *grab_cache_page_write_begin(struct address_space *mapping,
 375                         pgoff_t index, unsigned flags);
 376 
 377 
 378 
 379 
 380 static inline struct page *grab_cache_page(struct address_space *mapping,
 381                                                                 pgoff_t index)
 382 {
 383         return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
 384 }
 385 
 386 extern struct page * read_cache_page(struct address_space *mapping,
 387                                 pgoff_t index, filler_t *filler, void *data);
 388 extern struct page * read_cache_page_gfp(struct address_space *mapping,
 389                                 pgoff_t index, gfp_t gfp_mask);
 390 extern int read_cache_pages(struct address_space *mapping,
 391                 struct list_head *pages, filler_t *filler, void *data);
 392 
 393 static inline struct page *read_mapping_page(struct address_space *mapping,
 394                                 pgoff_t index, void *data)
 395 {
 396         return read_cache_page(mapping, index, NULL, data);
 397 }
 398 
 399 
 400 
 401 
 402 
 403 static inline pgoff_t page_to_index(struct page *page)
 404 {
 405         pgoff_t pgoff;
 406 
 407         if (likely(!PageTransTail(page)))
 408                 return page->index;
 409 
 410         
 411 
 412 
 413 
 414         pgoff = compound_head(page)->index;
 415         pgoff += page - compound_head(page);
 416         return pgoff;
 417 }
 418 
 419 
 420 
 421 
 422 
 423 static inline pgoff_t page_to_pgoff(struct page *page)
 424 {
 425         if (unlikely(PageHeadHuge(page)))
 426                 return page->index << compound_order(page);
 427 
 428         return page_to_index(page);
 429 }
 430 
 431 
 432 
 433 
 434 static inline loff_t page_offset(struct page *page)
 435 {
 436         return ((loff_t)page->index) << PAGE_SHIFT;
 437 }
 438 
 439 static inline loff_t page_file_offset(struct page *page)
 440 {
 441         return ((loff_t)page_index(page)) << PAGE_SHIFT;
 442 }
 443 
 444 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 445                                      unsigned long address);
 446 
 447 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 448                                         unsigned long address)
 449 {
 450         pgoff_t pgoff;
 451         if (unlikely(is_vm_hugetlb_page(vma)))
 452                 return linear_hugepage_index(vma, address);
 453         pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 454         pgoff += vma->vm_pgoff;
 455         return pgoff;
 456 }
 457 
 458 extern void __lock_page(struct page *page);
 459 extern int __lock_page_killable(struct page *page);
 460 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 461                                 unsigned int flags);
 462 extern void unlock_page(struct page *page);
 463 
 464 
 465 
 466 
 467 static inline int trylock_page(struct page *page)
 468 {
 469         page = compound_head(page);
 470         return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
 471 }
 472 
 473 
 474 
 475 
 476 static inline void lock_page(struct page *page)
 477 {
 478         might_sleep();
 479         if (!trylock_page(page))
 480                 __lock_page(page);
 481 }
 482 
 483 
 484 
 485 
 486 
 487 
 488 static inline int lock_page_killable(struct page *page)
 489 {
 490         might_sleep();
 491         if (!trylock_page(page))
 492                 return __lock_page_killable(page);
 493         return 0;
 494 }
 495 
 496 
 497 
 498 
 499 
 500 
 501 
 502 
 503 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 504                                      unsigned int flags)
 505 {
 506         might_sleep();
 507         return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
 508 }
 509 
 510 
 511 
 512 
 513 
 514 extern void wait_on_page_bit(struct page *page, int bit_nr);
 515 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
 516 
 517 
 518 
 519 
 520 
 521 
 522 
 523 
 524 static inline void wait_on_page_locked(struct page *page)
 525 {
 526         if (PageLocked(page))
 527                 wait_on_page_bit(compound_head(page), PG_locked);
 528 }
 529 
 530 static inline int wait_on_page_locked_killable(struct page *page)
 531 {
 532         if (!PageLocked(page))
 533                 return 0;
 534         return wait_on_page_bit_killable(compound_head(page), PG_locked);
 535 }
 536 
 537 extern void put_and_wait_on_page_locked(struct page *page);
 538 
 539 void wait_on_page_writeback(struct page *page);
 540 extern void end_page_writeback(struct page *page);
 541 void wait_for_stable_page(struct page *page);
 542 
 543 void page_endio(struct page *page, bool is_write, int err);
 544 
 545 
 546 
 547 
 548 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 549 
 550 
 551 
 552 
 553 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 554 {
 555         char __user *end = uaddr + size - 1;
 556 
 557         if (unlikely(size == 0))
 558                 return 0;
 559 
 560         if (unlikely(uaddr > end))
 561                 return -EFAULT;
 562         
 563 
 564 
 565 
 566         do {
 567                 if (unlikely(__put_user(0, uaddr) != 0))
 568                         return -EFAULT;
 569                 uaddr += PAGE_SIZE;
 570         } while (uaddr <= end);
 571 
 572         
 573         if (((unsigned long)uaddr & PAGE_MASK) ==
 574                         ((unsigned long)end & PAGE_MASK))
 575                 return __put_user(0, end);
 576 
 577         return 0;
 578 }
 579 
 580 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
 581 {
 582         volatile char c;
 583         const char __user *end = uaddr + size - 1;
 584 
 585         if (unlikely(size == 0))
 586                 return 0;
 587 
 588         if (unlikely(uaddr > end))
 589                 return -EFAULT;
 590 
 591         do {
 592                 if (unlikely(__get_user(c, uaddr) != 0))
 593                         return -EFAULT;
 594                 uaddr += PAGE_SIZE;
 595         } while (uaddr <= end);
 596 
 597         
 598         if (((unsigned long)uaddr & PAGE_MASK) ==
 599                         ((unsigned long)end & PAGE_MASK)) {
 600                 return __get_user(c, end);
 601         }
 602 
 603         (void)c;
 604         return 0;
 605 }
 606 
 607 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
 608                                 pgoff_t index, gfp_t gfp_mask);
 609 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
 610                                 pgoff_t index, gfp_t gfp_mask);
 611 extern void delete_from_page_cache(struct page *page);
 612 extern void __delete_from_page_cache(struct page *page, void *shadow);
 613 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 614 void delete_from_page_cache_batch(struct address_space *mapping,
 615                                   struct pagevec *pvec);
 616 
 617 
 618 
 619 
 620 
 621 static inline int add_to_page_cache(struct page *page,
 622                 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
 623 {
 624         int error;
 625 
 626         __SetPageLocked(page);
 627         error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
 628         if (unlikely(error))
 629                 __ClearPageLocked(page);
 630         return error;
 631 }
 632 
 633 static inline unsigned long dir_pages(struct inode *inode)
 634 {
 635         return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 636                                PAGE_SHIFT;
 637 }
 638 
 639 #endif