root/arch/riscv/kernel/vdso.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vdso_init
  2. arch_setup_additional_pages
  3. arch_vma_name

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
   4  *                    <benh@kernel.crashing.org>
   5  * Copyright (C) 2012 ARM Limited
   6  * Copyright (C) 2015 Regents of the University of California
   7  */
   8 
   9 #include <linux/elf.h>
  10 #include <linux/mm.h>
  11 #include <linux/slab.h>
  12 #include <linux/binfmts.h>
  13 #include <linux/err.h>
  14 
  15 #include <asm/vdso.h>
  16 
  17 extern char vdso_start[], vdso_end[];
  18 
  19 static unsigned int vdso_pages;
  20 static struct page **vdso_pagelist;
  21 
  22 /*
  23  * The vDSO data page.
  24  */
  25 static union {
  26         struct vdso_data        data;
  27         u8                      page[PAGE_SIZE];
  28 } vdso_data_store __page_aligned_data;
  29 static struct vdso_data *vdso_data = &vdso_data_store.data;
  30 
  31 static int __init vdso_init(void)
  32 {
  33         unsigned int i;
  34 
  35         vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
  36         vdso_pagelist =
  37                 kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
  38         if (unlikely(vdso_pagelist == NULL)) {
  39                 pr_err("vdso: pagelist allocation failed\n");
  40                 return -ENOMEM;
  41         }
  42 
  43         for (i = 0; i < vdso_pages; i++) {
  44                 struct page *pg;
  45 
  46                 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
  47                 vdso_pagelist[i] = pg;
  48         }
  49         vdso_pagelist[i] = virt_to_page(vdso_data);
  50 
  51         return 0;
  52 }
  53 arch_initcall(vdso_init);
  54 
  55 int arch_setup_additional_pages(struct linux_binprm *bprm,
  56         int uses_interp)
  57 {
  58         struct mm_struct *mm = current->mm;
  59         unsigned long vdso_base, vdso_len;
  60         int ret;
  61 
  62         vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
  63 
  64         down_write(&mm->mmap_sem);
  65         vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
  66         if (IS_ERR_VALUE(vdso_base)) {
  67                 ret = vdso_base;
  68                 goto end;
  69         }
  70 
  71         /*
  72          * Put vDSO base into mm struct. We need to do this before calling
  73          * install_special_mapping or the perf counter mmap tracking code
  74          * will fail to recognise it as a vDSO (since arch_vma_name fails).
  75          */
  76         mm->context.vdso = (void *)vdso_base;
  77 
  78         ret = install_special_mapping(mm, vdso_base, vdso_len,
  79                 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
  80                 vdso_pagelist);
  81 
  82         if (unlikely(ret))
  83                 mm->context.vdso = NULL;
  84 
  85 end:
  86         up_write(&mm->mmap_sem);
  87         return ret;
  88 }
  89 
  90 const char *arch_vma_name(struct vm_area_struct *vma)
  91 {
  92         if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
  93                 return "[vdso]";
  94         return NULL;
  95 }

/* [<][>][^][v][top][bottom][index][help] */