root/drivers/xen/mem-reservation.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __xenmem_reservation_va_mapping_update
  2. __xenmem_reservation_va_mapping_reset
  3. xenmem_reservation_increase
  4. xenmem_reservation_decrease

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 /******************************************************************************
   4  * Xen memory reservation utilities.
   5  *
   6  * Copyright (c) 2003, B Dragovic
   7  * Copyright (c) 2003-2004, M Williamson, K Fraser
   8  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
   9  * Copyright (c) 2010 Daniel Kiper
  10  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  11  */
  12 
  13 #include <asm/xen/hypercall.h>
  14 
  15 #include <xen/interface/memory.h>
  16 #include <xen/mem-reservation.h>
  17 #include <linux/moduleparam.h>
  18 
  19 bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
  20 core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
  21 
  22 /*
  23  * Use one extent per PAGE_SIZE to avoid to break down the page into
  24  * multiple frame.
  25  */
  26 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
  27 
  28 #ifdef CONFIG_XEN_HAVE_PVMMU
  29 void __xenmem_reservation_va_mapping_update(unsigned long count,
  30                                             struct page **pages,
  31                                             xen_pfn_t *frames)
  32 {
  33         int i;
  34 
  35         for (i = 0; i < count; i++) {
  36                 struct page *page = pages[i];
  37                 unsigned long pfn = page_to_pfn(page);
  38 
  39                 BUG_ON(!page);
  40 
  41                 /*
  42                  * We don't support PV MMU when Linux and Xen is using
  43                  * different page granularity.
  44                  */
  45                 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  46 
  47                 set_phys_to_machine(pfn, frames[i]);
  48 
  49                 /* Link back into the page tables if not highmem. */
  50                 if (!PageHighMem(page)) {
  51                         int ret;
  52 
  53                         ret = HYPERVISOR_update_va_mapping(
  54                                         (unsigned long)__va(pfn << PAGE_SHIFT),
  55                                         mfn_pte(frames[i], PAGE_KERNEL),
  56                                         0);
  57                         BUG_ON(ret);
  58                 }
  59         }
  60 }
  61 EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
  62 
  63 void __xenmem_reservation_va_mapping_reset(unsigned long count,
  64                                            struct page **pages)
  65 {
  66         int i;
  67 
  68         for (i = 0; i < count; i++) {
  69                 struct page *page = pages[i];
  70                 unsigned long pfn = page_to_pfn(page);
  71 
  72                 /*
  73                  * We don't support PV MMU when Linux and Xen are using
  74                  * different page granularity.
  75                  */
  76                 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  77 
  78                 if (!PageHighMem(page)) {
  79                         int ret;
  80 
  81                         ret = HYPERVISOR_update_va_mapping(
  82                                         (unsigned long)__va(pfn << PAGE_SHIFT),
  83                                         __pte_ma(0), 0);
  84                         BUG_ON(ret);
  85                 }
  86                 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  87         }
  88 }
  89 EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
  90 #endif /* CONFIG_XEN_HAVE_PVMMU */
  91 
  92 /* @frames is an array of PFNs */
  93 int xenmem_reservation_increase(int count, xen_pfn_t *frames)
  94 {
  95         struct xen_memory_reservation reservation = {
  96                 .address_bits = 0,
  97                 .extent_order = EXTENT_ORDER,
  98                 .domid        = DOMID_SELF
  99         };
 100 
 101         /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
 102         set_xen_guest_handle(reservation.extent_start, frames);
 103         reservation.nr_extents = count;
 104         return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
 105 }
 106 EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
 107 
 108 /* @frames is an array of GFNs */
 109 int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
 110 {
 111         struct xen_memory_reservation reservation = {
 112                 .address_bits = 0,
 113                 .extent_order = EXTENT_ORDER,
 114                 .domid        = DOMID_SELF
 115         };
 116 
 117         /* XENMEM_decrease_reservation requires a GFN */
 118         set_xen_guest_handle(reservation.extent_start, frames);
 119         reservation.nr_extents = count;
 120         return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
 121 }
 122 EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);

/* [<][>][^][v][top][bottom][index][help] */