root/drivers/staging/android/ashmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. range_size
  2. range_on_lru
  3. page_range_subsumes_range
  4. page_range_subsumed_by_range
  5. page_in_range
  6. page_range_in_range
  7. range_before_page
  8. lru_add
  9. lru_del
  10. range_alloc
  11. range_del
  12. range_shrink
  13. ashmem_open
  14. ashmem_release
  15. ashmem_read_iter
  16. ashmem_llseek
  17. calc_vm_may_flags
  18. ashmem_vmfile_mmap
  19. ashmem_vmfile_get_unmapped_area
  20. ashmem_mmap
  21. ashmem_shrink_scan
  22. ashmem_shrink_count
  23. set_prot_mask
  24. set_name
  25. get_name
  26. ashmem_pin
  27. ashmem_unpin
  28. ashmem_get_pin_status
  29. ashmem_pin_unpin
  30. ashmem_ioctl
  31. compat_ashmem_ioctl
  32. ashmem_show_fdinfo
  33. ashmem_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* mm/ashmem.c
   3  *
   4  * Anonymous Shared Memory Subsystem, ashmem
   5  *
   6  * Copyright (C) 2008 Google, Inc.
   7  *
   8  * Robert Love <rlove@google.com>
   9  */
  10 
  11 #define pr_fmt(fmt) "ashmem: " fmt
  12 
  13 #include <linux/init.h>
  14 #include <linux/export.h>
  15 #include <linux/file.h>
  16 #include <linux/fs.h>
  17 #include <linux/falloc.h>
  18 #include <linux/miscdevice.h>
  19 #include <linux/security.h>
  20 #include <linux/mm.h>
  21 #include <linux/mman.h>
  22 #include <linux/uaccess.h>
  23 #include <linux/personality.h>
  24 #include <linux/bitops.h>
  25 #include <linux/mutex.h>
  26 #include <linux/shmem_fs.h>
  27 #include "ashmem.h"
  28 
  29 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
  30 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
  31 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
  32 
  33 /**
  34  * struct ashmem_area - The anonymous shared memory area
  35  * @name:               The optional name in /proc/pid/maps
  36  * @unpinned_list:      The list of all ashmem areas
  37  * @file:               The shmem-based backing file
  38  * @size:               The size of the mapping, in bytes
  39  * @prot_mask:          The allowed protection bits, as vm_flags
  40  *
  41  * The lifecycle of this structure is from our parent file's open() until
  42  * its release(). It is also protected by 'ashmem_mutex'
  43  *
  44  * Warning: Mappings do NOT pin this structure; It dies on close()
  45  */
  46 struct ashmem_area {
  47         char name[ASHMEM_FULL_NAME_LEN];
  48         struct list_head unpinned_list;
  49         struct file *file;
  50         size_t size;
  51         unsigned long prot_mask;
  52 };
  53 
  54 /**
  55  * struct ashmem_range - A range of unpinned/evictable pages
  56  * @lru:                 The entry in the LRU list
  57  * @unpinned:            The entry in its area's unpinned list
  58  * @asma:                The associated anonymous shared memory area.
  59  * @pgstart:             The starting page (inclusive)
  60  * @pgend:               The ending page (inclusive)
  61  * @purged:              The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
  62  *
  63  * The lifecycle of this structure is from unpin to pin.
  64  * It is protected by 'ashmem_mutex'
  65  */
  66 struct ashmem_range {
  67         struct list_head lru;
  68         struct list_head unpinned;
  69         struct ashmem_area *asma;
  70         size_t pgstart;
  71         size_t pgend;
  72         unsigned int purged;
  73 };
  74 
  75 /* LRU list of unpinned pages, protected by ashmem_mutex */
  76 static LIST_HEAD(ashmem_lru_list);
  77 
  78 static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
  79 static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
  80 
  81 /*
  82  * long lru_count - The count of pages on our LRU list.
  83  *
  84  * This is protected by ashmem_mutex.
  85  */
  86 static unsigned long lru_count;
  87 
  88 /*
  89  * ashmem_mutex - protects the list of and each individual ashmem_area
  90  *
  91  * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
  92  */
  93 static DEFINE_MUTEX(ashmem_mutex);
  94 
  95 static struct kmem_cache *ashmem_area_cachep __read_mostly;
  96 static struct kmem_cache *ashmem_range_cachep __read_mostly;
  97 
  98 static inline unsigned long range_size(struct ashmem_range *range)
  99 {
 100         return range->pgend - range->pgstart + 1;
 101 }
 102 
 103 static inline bool range_on_lru(struct ashmem_range *range)
 104 {
 105         return range->purged == ASHMEM_NOT_PURGED;
 106 }
 107 
 108 static inline bool page_range_subsumes_range(struct ashmem_range *range,
 109                                              size_t start, size_t end)
 110 {
 111         return (range->pgstart >= start) && (range->pgend <= end);
 112 }
 113 
 114 static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
 115                                                 size_t start, size_t end)
 116 {
 117         return (range->pgstart <= start) && (range->pgend >= end);
 118 }
 119 
 120 static inline bool page_in_range(struct ashmem_range *range, size_t page)
 121 {
 122         return (range->pgstart <= page) && (range->pgend >= page);
 123 }
 124 
 125 static inline bool page_range_in_range(struct ashmem_range *range,
 126                                        size_t start, size_t end)
 127 {
 128         return page_in_range(range, start) || page_in_range(range, end) ||
 129                 page_range_subsumes_range(range, start, end);
 130 }
 131 
 132 static inline bool range_before_page(struct ashmem_range *range,
 133                                      size_t page)
 134 {
 135         return range->pgend < page;
 136 }
 137 
 138 #define PROT_MASK               (PROT_EXEC | PROT_READ | PROT_WRITE)
 139 
 140 /**
 141  * lru_add() - Adds a range of memory to the LRU list
 142  * @range:     The memory range being added.
 143  *
 144  * The range is first added to the end (tail) of the LRU list.
 145  * After this, the size of the range is added to @lru_count
 146  */
 147 static inline void lru_add(struct ashmem_range *range)
 148 {
 149         list_add_tail(&range->lru, &ashmem_lru_list);
 150         lru_count += range_size(range);
 151 }
 152 
 153 /**
 154  * lru_del() - Removes a range of memory from the LRU list
 155  * @range:     The memory range being removed
 156  *
 157  * The range is first deleted from the LRU list.
 158  * After this, the size of the range is removed from @lru_count
 159  */
 160 static inline void lru_del(struct ashmem_range *range)
 161 {
 162         list_del(&range->lru);
 163         lru_count -= range_size(range);
 164 }
 165 
 166 /**
 167  * range_alloc() - Allocates and initializes a new ashmem_range structure
 168  * @asma:          The associated ashmem_area
 169  * @prev_range:    The previous ashmem_range in the sorted asma->unpinned list
 170  * @purged:        Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
 171  * @start:         The starting page (inclusive)
 172  * @end:           The ending page (inclusive)
 173  *
 174  * This function is protected by ashmem_mutex.
 175  */
 176 static void range_alloc(struct ashmem_area *asma,
 177                         struct ashmem_range *prev_range, unsigned int purged,
 178                         size_t start, size_t end,
 179                         struct ashmem_range **new_range)
 180 {
 181         struct ashmem_range *range = *new_range;
 182 
 183         *new_range = NULL;
 184         range->asma = asma;
 185         range->pgstart = start;
 186         range->pgend = end;
 187         range->purged = purged;
 188 
 189         list_add_tail(&range->unpinned, &prev_range->unpinned);
 190 
 191         if (range_on_lru(range))
 192                 lru_add(range);
 193 }
 194 
 195 /**
 196  * range_del() - Deletes and deallocates an ashmem_range structure
 197  * @range:       The associated ashmem_range that has previously been allocated
 198  */
 199 static void range_del(struct ashmem_range *range)
 200 {
 201         list_del(&range->unpinned);
 202         if (range_on_lru(range))
 203                 lru_del(range);
 204         kmem_cache_free(ashmem_range_cachep, range);
 205 }
 206 
 207 /**
 208  * range_shrink() - Shrinks an ashmem_range
 209  * @range:          The associated ashmem_range being shrunk
 210  * @start:          The starting byte of the new range
 211  * @end:            The ending byte of the new range
 212  *
 213  * This does not modify the data inside the existing range in any way - It
 214  * simply shrinks the boundaries of the range.
 215  *
 216  * Theoretically, with a little tweaking, this could eventually be changed
 217  * to range_resize, and expand the lru_count if the new range is larger.
 218  */
 219 static inline void range_shrink(struct ashmem_range *range,
 220                                 size_t start, size_t end)
 221 {
 222         size_t pre = range_size(range);
 223 
 224         range->pgstart = start;
 225         range->pgend = end;
 226 
 227         if (range_on_lru(range))
 228                 lru_count -= pre - range_size(range);
 229 }
 230 
 231 /**
 232  * ashmem_open() - Opens an Anonymous Shared Memory structure
 233  * @inode:         The backing file's index node(?)
 234  * @file:          The backing file
 235  *
 236  * Please note that the ashmem_area is not returned by this function - It is
 237  * instead written to "file->private_data".
 238  *
 239  * Return: 0 if successful, or another code if unsuccessful.
 240  */
 241 static int ashmem_open(struct inode *inode, struct file *file)
 242 {
 243         struct ashmem_area *asma;
 244         int ret;
 245 
 246         ret = generic_file_open(inode, file);
 247         if (ret)
 248                 return ret;
 249 
 250         asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
 251         if (!asma)
 252                 return -ENOMEM;
 253 
 254         INIT_LIST_HEAD(&asma->unpinned_list);
 255         memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
 256         asma->prot_mask = PROT_MASK;
 257         file->private_data = asma;
 258 
 259         return 0;
 260 }
 261 
 262 /**
 263  * ashmem_release() - Releases an Anonymous Shared Memory structure
 264  * @ignored:          The backing file's Index Node(?) - It is ignored here.
 265  * @file:             The backing file
 266  *
 267  * Return: 0 if successful. If it is anything else, go have a coffee and
 268  * try again.
 269  */
 270 static int ashmem_release(struct inode *ignored, struct file *file)
 271 {
 272         struct ashmem_area *asma = file->private_data;
 273         struct ashmem_range *range, *next;
 274 
 275         mutex_lock(&ashmem_mutex);
 276         list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
 277                 range_del(range);
 278         mutex_unlock(&ashmem_mutex);
 279 
 280         if (asma->file)
 281                 fput(asma->file);
 282         kmem_cache_free(ashmem_area_cachep, asma);
 283 
 284         return 0;
 285 }
 286 
 287 static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 288 {
 289         struct ashmem_area *asma = iocb->ki_filp->private_data;
 290         int ret = 0;
 291 
 292         mutex_lock(&ashmem_mutex);
 293 
 294         /* If size is not set, or set to 0, always return EOF. */
 295         if (asma->size == 0)
 296                 goto out_unlock;
 297 
 298         if (!asma->file) {
 299                 ret = -EBADF;
 300                 goto out_unlock;
 301         }
 302 
 303         /*
 304          * asma and asma->file are used outside the lock here.  We assume
 305          * once asma->file is set it will never be changed, and will not
 306          * be destroyed until all references to the file are dropped and
 307          * ashmem_release is called.
 308          */
 309         mutex_unlock(&ashmem_mutex);
 310         ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
 311         mutex_lock(&ashmem_mutex);
 312         if (ret > 0)
 313                 asma->file->f_pos = iocb->ki_pos;
 314 out_unlock:
 315         mutex_unlock(&ashmem_mutex);
 316         return ret;
 317 }
 318 
 319 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
 320 {
 321         struct ashmem_area *asma = file->private_data;
 322         loff_t ret;
 323 
 324         mutex_lock(&ashmem_mutex);
 325 
 326         if (asma->size == 0) {
 327                 mutex_unlock(&ashmem_mutex);
 328                 return -EINVAL;
 329         }
 330 
 331         if (!asma->file) {
 332                 mutex_unlock(&ashmem_mutex);
 333                 return -EBADF;
 334         }
 335 
 336         mutex_unlock(&ashmem_mutex);
 337 
 338         ret = vfs_llseek(asma->file, offset, origin);
 339         if (ret < 0)
 340                 return ret;
 341 
 342         /** Copy f_pos from backing file, since f_ops->llseek() sets it */
 343         file->f_pos = asma->file->f_pos;
 344         return ret;
 345 }
 346 
 347 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
 348 {
 349         return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
 350                _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
 351                _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
 352 }
 353 
 354 static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
 355 {
 356         /* do not allow to mmap ashmem backing shmem file directly */
 357         return -EPERM;
 358 }
 359 
 360 static unsigned long
 361 ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
 362                                 unsigned long len, unsigned long pgoff,
 363                                 unsigned long flags)
 364 {
 365         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 366 }
 367 
 368 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 369 {
 370         static struct file_operations vmfile_fops;
 371         struct ashmem_area *asma = file->private_data;
 372         int ret = 0;
 373 
 374         mutex_lock(&ashmem_mutex);
 375 
 376         /* user needs to SET_SIZE before mapping */
 377         if (!asma->size) {
 378                 ret = -EINVAL;
 379                 goto out;
 380         }
 381 
 382         /* requested mapping size larger than object size */
 383         if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
 384                 ret = -EINVAL;
 385                 goto out;
 386         }
 387 
 388         /* requested protection bits must match our allowed protection mask */
 389         if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
 390             calc_vm_prot_bits(PROT_MASK, 0)) {
 391                 ret = -EPERM;
 392                 goto out;
 393         }
 394         vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
 395 
 396         if (!asma->file) {
 397                 char *name = ASHMEM_NAME_DEF;
 398                 struct file *vmfile;
 399 
 400                 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
 401                         name = asma->name;
 402 
 403                 /* ... and allocate the backing shmem file */
 404                 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
 405                 if (IS_ERR(vmfile)) {
 406                         ret = PTR_ERR(vmfile);
 407                         goto out;
 408                 }
 409                 vmfile->f_mode |= FMODE_LSEEK;
 410                 asma->file = vmfile;
 411                 /*
 412                  * override mmap operation of the vmfile so that it can't be
 413                  * remapped which would lead to creation of a new vma with no
 414                  * asma permission checks. Have to override get_unmapped_area
 415                  * as well to prevent VM_BUG_ON check for f_ops modification.
 416                  */
 417                 if (!vmfile_fops.mmap) {
 418                         vmfile_fops = *vmfile->f_op;
 419                         vmfile_fops.mmap = ashmem_vmfile_mmap;
 420                         vmfile_fops.get_unmapped_area =
 421                                         ashmem_vmfile_get_unmapped_area;
 422                 }
 423                 vmfile->f_op = &vmfile_fops;
 424         }
 425         get_file(asma->file);
 426 
 427         /*
 428          * XXX - Reworked to use shmem_zero_setup() instead of
 429          * shmem_set_file while we're in staging. -jstultz
 430          */
 431         if (vma->vm_flags & VM_SHARED) {
 432                 ret = shmem_zero_setup(vma);
 433                 if (ret) {
 434                         fput(asma->file);
 435                         goto out;
 436                 }
 437         } else {
 438                 vma_set_anonymous(vma);
 439         }
 440 
 441         if (vma->vm_file)
 442                 fput(vma->vm_file);
 443         vma->vm_file = asma->file;
 444 
 445 out:
 446         mutex_unlock(&ashmem_mutex);
 447         return ret;
 448 }
 449 
 450 /*
 451  * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
 452  *
 453  * 'nr_to_scan' is the number of objects to scan for freeing.
 454  *
 455  * 'gfp_mask' is the mask of the allocation that got us into this mess.
 456  *
 457  * Return value is the number of objects freed or -1 if we cannot
 458  * proceed without risk of deadlock (due to gfp_mask).
 459  *
 460  * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
 461  * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
 462  * pages freed.
 463  */
 464 static unsigned long
 465 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 466 {
 467         unsigned long freed = 0;
 468 
 469         /* We might recurse into filesystem code, so bail out if necessary */
 470         if (!(sc->gfp_mask & __GFP_FS))
 471                 return SHRINK_STOP;
 472 
 473         if (!mutex_trylock(&ashmem_mutex))
 474                 return -1;
 475 
 476         while (!list_empty(&ashmem_lru_list)) {
 477                 struct ashmem_range *range =
 478                         list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 479                 loff_t start = range->pgstart * PAGE_SIZE;
 480                 loff_t end = (range->pgend + 1) * PAGE_SIZE;
 481                 struct file *f = range->asma->file;
 482 
 483                 get_file(f);
 484                 atomic_inc(&ashmem_shrink_inflight);
 485                 range->purged = ASHMEM_WAS_PURGED;
 486                 lru_del(range);
 487 
 488                 freed += range_size(range);
 489                 mutex_unlock(&ashmem_mutex);
 490                 f->f_op->fallocate(f,
 491                                    FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
 492                                    start, end - start);
 493                 fput(f);
 494                 if (atomic_dec_and_test(&ashmem_shrink_inflight))
 495                         wake_up_all(&ashmem_shrink_wait);
 496                 if (!mutex_trylock(&ashmem_mutex))
 497                         goto out;
 498                 if (--sc->nr_to_scan <= 0)
 499                         break;
 500         }
 501         mutex_unlock(&ashmem_mutex);
 502 out:
 503         return freed;
 504 }
 505 
 506 static unsigned long
 507 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 508 {
 509         /*
 510          * note that lru_count is count of pages on the lru, not a count of
 511          * objects on the list. This means the scan function needs to return the
 512          * number of pages freed, not the number of objects scanned.
 513          */
 514         return lru_count;
 515 }
 516 
 517 static struct shrinker ashmem_shrinker = {
 518         .count_objects = ashmem_shrink_count,
 519         .scan_objects = ashmem_shrink_scan,
 520         /*
 521          * XXX (dchinner): I wish people would comment on why they need on
 522          * significant changes to the default value here
 523          */
 524         .seeks = DEFAULT_SEEKS * 4,
 525 };
 526 
 527 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
 528 {
 529         int ret = 0;
 530 
 531         mutex_lock(&ashmem_mutex);
 532 
 533         /* the user can only remove, not add, protection bits */
 534         if ((asma->prot_mask & prot) != prot) {
 535                 ret = -EINVAL;
 536                 goto out;
 537         }
 538 
 539         /* does the application expect PROT_READ to imply PROT_EXEC? */
 540         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
 541                 prot |= PROT_EXEC;
 542 
 543         asma->prot_mask = prot;
 544 
 545 out:
 546         mutex_unlock(&ashmem_mutex);
 547         return ret;
 548 }
 549 
 550 static int set_name(struct ashmem_area *asma, void __user *name)
 551 {
 552         int len;
 553         int ret = 0;
 554         char local_name[ASHMEM_NAME_LEN];
 555 
 556         /*
 557          * Holding the ashmem_mutex while doing a copy_from_user might cause
 558          * an data abort which would try to access mmap_sem. If another
 559          * thread has invoked ashmem_mmap then it will be holding the
 560          * semaphore and will be waiting for ashmem_mutex, there by leading to
 561          * deadlock. We'll release the mutex and take the name to a local
 562          * variable that does not need protection and later copy the local
 563          * variable to the structure member with lock held.
 564          */
 565         len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
 566         if (len < 0)
 567                 return len;
 568         if (len == ASHMEM_NAME_LEN)
 569                 local_name[ASHMEM_NAME_LEN - 1] = '\0';
 570         mutex_lock(&ashmem_mutex);
 571         /* cannot change an existing mapping's name */
 572         if (asma->file)
 573                 ret = -EINVAL;
 574         else
 575                 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
 576 
 577         mutex_unlock(&ashmem_mutex);
 578         return ret;
 579 }
 580 
 581 static int get_name(struct ashmem_area *asma, void __user *name)
 582 {
 583         int ret = 0;
 584         size_t len;
 585         /*
 586          * Have a local variable to which we'll copy the content
 587          * from asma with the lock held. Later we can copy this to the user
 588          * space safely without holding any locks. So even if we proceed to
 589          * wait for mmap_sem, it won't lead to deadlock.
 590          */
 591         char local_name[ASHMEM_NAME_LEN];
 592 
 593         mutex_lock(&ashmem_mutex);
 594         if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
 595                 /*
 596                  * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
 597                  * prevents us from revealing one user's stack to another.
 598                  */
 599                 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
 600                 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
 601         } else {
 602                 len = sizeof(ASHMEM_NAME_DEF);
 603                 memcpy(local_name, ASHMEM_NAME_DEF, len);
 604         }
 605         mutex_unlock(&ashmem_mutex);
 606 
 607         /*
 608          * Now we are just copying from the stack variable to userland
 609          * No lock held
 610          */
 611         if (copy_to_user(name, local_name, len))
 612                 ret = -EFAULT;
 613         return ret;
 614 }
 615 
 616 /*
 617  * ashmem_pin - pin the given ashmem region, returning whether it was
 618  * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
 619  *
 620  * Caller must hold ashmem_mutex.
 621  */
 622 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
 623                       struct ashmem_range **new_range)
 624 {
 625         struct ashmem_range *range, *next;
 626         int ret = ASHMEM_NOT_PURGED;
 627 
 628         list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 629                 /* moved past last applicable page; we can short circuit */
 630                 if (range_before_page(range, pgstart))
 631                         break;
 632 
 633                 /*
 634                  * The user can ask us to pin pages that span multiple ranges,
 635                  * or to pin pages that aren't even unpinned, so this is messy.
 636                  *
 637                  * Four cases:
 638                  * 1. The requested range subsumes an existing range, so we
 639                  *    just remove the entire matching range.
 640                  * 2. The requested range overlaps the start of an existing
 641                  *    range, so we just update that range.
 642                  * 3. The requested range overlaps the end of an existing
 643                  *    range, so we just update that range.
 644                  * 4. The requested range punches a hole in an existing range,
 645                  *    so we have to update one side of the range and then
 646                  *    create a new range for the other side.
 647                  */
 648                 if (page_range_in_range(range, pgstart, pgend)) {
 649                         ret |= range->purged;
 650 
 651                         /* Case #1: Easy. Just nuke the whole thing. */
 652                         if (page_range_subsumes_range(range, pgstart, pgend)) {
 653                                 range_del(range);
 654                                 continue;
 655                         }
 656 
 657                         /* Case #2: We overlap from the start, so adjust it */
 658                         if (range->pgstart >= pgstart) {
 659                                 range_shrink(range, pgend + 1, range->pgend);
 660                                 continue;
 661                         }
 662 
 663                         /* Case #3: We overlap from the rear, so adjust it */
 664                         if (range->pgend <= pgend) {
 665                                 range_shrink(range, range->pgstart,
 666                                              pgstart - 1);
 667                                 continue;
 668                         }
 669 
 670                         /*
 671                          * Case #4: We eat a chunk out of the middle. A bit
 672                          * more complicated, we allocate a new range for the
 673                          * second half and adjust the first chunk's endpoint.
 674                          */
 675                         range_alloc(asma, range, range->purged,
 676                                     pgend + 1, range->pgend, new_range);
 677                         range_shrink(range, range->pgstart, pgstart - 1);
 678                         break;
 679                 }
 680         }
 681 
 682         return ret;
 683 }
 684 
 685 /*
 686  * ashmem_unpin - unpin the given range of pages. Returns zero on success.
 687  *
 688  * Caller must hold ashmem_mutex.
 689  */
 690 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
 691                         struct ashmem_range **new_range)
 692 {
 693         struct ashmem_range *range, *next;
 694         unsigned int purged = ASHMEM_NOT_PURGED;
 695 
 696 restart:
 697         list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 698                 /* short circuit: this is our insertion point */
 699                 if (range_before_page(range, pgstart))
 700                         break;
 701 
 702                 /*
 703                  * The user can ask us to unpin pages that are already entirely
 704                  * or partially pinned. We handle those two cases here.
 705                  */
 706                 if (page_range_subsumed_by_range(range, pgstart, pgend))
 707                         return 0;
 708                 if (page_range_in_range(range, pgstart, pgend)) {
 709                         pgstart = min(range->pgstart, pgstart);
 710                         pgend = max(range->pgend, pgend);
 711                         purged |= range->purged;
 712                         range_del(range);
 713                         goto restart;
 714                 }
 715         }
 716 
 717         range_alloc(asma, range, purged, pgstart, pgend, new_range);
 718         return 0;
 719 }
 720 
 721 /*
 722  * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
 723  * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
 724  *
 725  * Caller must hold ashmem_mutex.
 726  */
 727 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
 728                                  size_t pgend)
 729 {
 730         struct ashmem_range *range;
 731         int ret = ASHMEM_IS_PINNED;
 732 
 733         list_for_each_entry(range, &asma->unpinned_list, unpinned) {
 734                 if (range_before_page(range, pgstart))
 735                         break;
 736                 if (page_range_in_range(range, pgstart, pgend)) {
 737                         ret = ASHMEM_IS_UNPINNED;
 738                         break;
 739                 }
 740         }
 741 
 742         return ret;
 743 }
 744 
 745 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 746                             void __user *p)
 747 {
 748         struct ashmem_pin pin;
 749         size_t pgstart, pgend;
 750         int ret = -EINVAL;
 751         struct ashmem_range *range = NULL;
 752 
 753         if (copy_from_user(&pin, p, sizeof(pin)))
 754                 return -EFAULT;
 755 
 756         if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
 757                 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
 758                 if (!range)
 759                         return -ENOMEM;
 760         }
 761 
 762         mutex_lock(&ashmem_mutex);
 763         wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 764 
 765         if (!asma->file)
 766                 goto out_unlock;
 767 
 768         /* per custom, you can pass zero for len to mean "everything onward" */
 769         if (!pin.len)
 770                 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
 771 
 772         if ((pin.offset | pin.len) & ~PAGE_MASK)
 773                 goto out_unlock;
 774 
 775         if (((__u32)-1) - pin.offset < pin.len)
 776                 goto out_unlock;
 777 
 778         if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
 779                 goto out_unlock;
 780 
 781         pgstart = pin.offset / PAGE_SIZE;
 782         pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
 783 
 784         switch (cmd) {
 785         case ASHMEM_PIN:
 786                 ret = ashmem_pin(asma, pgstart, pgend, &range);
 787                 break;
 788         case ASHMEM_UNPIN:
 789                 ret = ashmem_unpin(asma, pgstart, pgend, &range);
 790                 break;
 791         case ASHMEM_GET_PIN_STATUS:
 792                 ret = ashmem_get_pin_status(asma, pgstart, pgend);
 793                 break;
 794         }
 795 
 796 out_unlock:
 797         mutex_unlock(&ashmem_mutex);
 798         if (range)
 799                 kmem_cache_free(ashmem_range_cachep, range);
 800 
 801         return ret;
 802 }
 803 
 804 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 805 {
 806         struct ashmem_area *asma = file->private_data;
 807         long ret = -ENOTTY;
 808 
 809         switch (cmd) {
 810         case ASHMEM_SET_NAME:
 811                 ret = set_name(asma, (void __user *)arg);
 812                 break;
 813         case ASHMEM_GET_NAME:
 814                 ret = get_name(asma, (void __user *)arg);
 815                 break;
 816         case ASHMEM_SET_SIZE:
 817                 ret = -EINVAL;
 818                 mutex_lock(&ashmem_mutex);
 819                 if (!asma->file) {
 820                         ret = 0;
 821                         asma->size = (size_t)arg;
 822                 }
 823                 mutex_unlock(&ashmem_mutex);
 824                 break;
 825         case ASHMEM_GET_SIZE:
 826                 ret = asma->size;
 827                 break;
 828         case ASHMEM_SET_PROT_MASK:
 829                 ret = set_prot_mask(asma, arg);
 830                 break;
 831         case ASHMEM_GET_PROT_MASK:
 832                 ret = asma->prot_mask;
 833                 break;
 834         case ASHMEM_PIN:
 835         case ASHMEM_UNPIN:
 836         case ASHMEM_GET_PIN_STATUS:
 837                 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
 838                 break;
 839         case ASHMEM_PURGE_ALL_CACHES:
 840                 ret = -EPERM;
 841                 if (capable(CAP_SYS_ADMIN)) {
 842                         struct shrink_control sc = {
 843                                 .gfp_mask = GFP_KERNEL,
 844                                 .nr_to_scan = LONG_MAX,
 845                         };
 846                         ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
 847                         ashmem_shrink_scan(&ashmem_shrinker, &sc);
 848                 }
 849                 break;
 850         }
 851 
 852         return ret;
 853 }
 854 
 855 /* support of 32bit userspace on 64bit platforms */
 856 #ifdef CONFIG_COMPAT
 857 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
 858                                 unsigned long arg)
 859 {
 860         switch (cmd) {
 861         case COMPAT_ASHMEM_SET_SIZE:
 862                 cmd = ASHMEM_SET_SIZE;
 863                 break;
 864         case COMPAT_ASHMEM_SET_PROT_MASK:
 865                 cmd = ASHMEM_SET_PROT_MASK;
 866                 break;
 867         }
 868         return ashmem_ioctl(file, cmd, arg);
 869 }
 870 #endif
 871 #ifdef CONFIG_PROC_FS
 872 static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
 873 {
 874         struct ashmem_area *asma = file->private_data;
 875 
 876         mutex_lock(&ashmem_mutex);
 877 
 878         if (asma->file)
 879                 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
 880 
 881         if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
 882                 seq_printf(m, "name:\t%s\n",
 883                            asma->name + ASHMEM_NAME_PREFIX_LEN);
 884 
 885         mutex_unlock(&ashmem_mutex);
 886 }
 887 #endif
 888 static const struct file_operations ashmem_fops = {
 889         .owner = THIS_MODULE,
 890         .open = ashmem_open,
 891         .release = ashmem_release,
 892         .read_iter = ashmem_read_iter,
 893         .llseek = ashmem_llseek,
 894         .mmap = ashmem_mmap,
 895         .unlocked_ioctl = ashmem_ioctl,
 896 #ifdef CONFIG_COMPAT
 897         .compat_ioctl = compat_ashmem_ioctl,
 898 #endif
 899 #ifdef CONFIG_PROC_FS
 900         .show_fdinfo = ashmem_show_fdinfo,
 901 #endif
 902 };
 903 
 904 static struct miscdevice ashmem_misc = {
 905         .minor = MISC_DYNAMIC_MINOR,
 906         .name = "ashmem",
 907         .fops = &ashmem_fops,
 908 };
 909 
 910 static int __init ashmem_init(void)
 911 {
 912         int ret = -ENOMEM;
 913 
 914         ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
 915                                                sizeof(struct ashmem_area),
 916                                                0, 0, NULL);
 917         if (!ashmem_area_cachep) {
 918                 pr_err("failed to create slab cache\n");
 919                 goto out;
 920         }
 921 
 922         ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
 923                                                 sizeof(struct ashmem_range),
 924                                                 0, 0, NULL);
 925         if (!ashmem_range_cachep) {
 926                 pr_err("failed to create slab cache\n");
 927                 goto out_free1;
 928         }
 929 
 930         ret = misc_register(&ashmem_misc);
 931         if (ret) {
 932                 pr_err("failed to register misc device!\n");
 933                 goto out_free2;
 934         }
 935 
 936         ret = register_shrinker(&ashmem_shrinker);
 937         if (ret) {
 938                 pr_err("failed to register shrinker!\n");
 939                 goto out_demisc;
 940         }
 941 
 942         pr_info("initialized\n");
 943 
 944         return 0;
 945 
 946 out_demisc:
 947         misc_deregister(&ashmem_misc);
 948 out_free2:
 949         kmem_cache_destroy(ashmem_range_cachep);
 950 out_free1:
 951         kmem_cache_destroy(ashmem_area_cachep);
 952 out:
 953         return ret;
 954 }
 955 device_initcall(ashmem_init);

/* [<][>][^][v][top][bottom][index][help] */