root/arch/arm/xen/p2m.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xen_add_phys_to_mach_entry
  2. __pfn_to_mfn
  3. set_foreign_p2m_mapping
  4. clear_foreign_p2m_mapping
  5. __set_phys_to_machine_multi
  6. __set_phys_to_machine
  7. p2m_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 #include <linux/memblock.h>
   3 #include <linux/gfp.h>
   4 #include <linux/export.h>
   5 #include <linux/spinlock.h>
   6 #include <linux/slab.h>
   7 #include <linux/types.h>
   8 #include <linux/dma-mapping.h>
   9 #include <linux/vmalloc.h>
  10 #include <linux/swiotlb.h>
  11 
  12 #include <xen/xen.h>
  13 #include <xen/interface/memory.h>
  14 #include <xen/page.h>
  15 #include <xen/swiotlb-xen.h>
  16 
  17 #include <asm/cacheflush.h>
  18 #include <asm/xen/hypercall.h>
  19 #include <asm/xen/interface.h>
  20 
  21 struct xen_p2m_entry {
  22         unsigned long pfn;
  23         unsigned long mfn;
  24         unsigned long nr_pages;
  25         struct rb_node rbnode_phys;
  26 };
  27 
  28 static rwlock_t p2m_lock;
  29 struct rb_root phys_to_mach = RB_ROOT;
  30 EXPORT_SYMBOL_GPL(phys_to_mach);
  31 
  32 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
  33 {
  34         struct rb_node **link = &phys_to_mach.rb_node;
  35         struct rb_node *parent = NULL;
  36         struct xen_p2m_entry *entry;
  37         int rc = 0;
  38 
  39         while (*link) {
  40                 parent = *link;
  41                 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
  42 
  43                 if (new->pfn == entry->pfn)
  44                         goto err_out;
  45 
  46                 if (new->pfn < entry->pfn)
  47                         link = &(*link)->rb_left;
  48                 else
  49                         link = &(*link)->rb_right;
  50         }
  51         rb_link_node(&new->rbnode_phys, parent, link);
  52         rb_insert_color(&new->rbnode_phys, &phys_to_mach);
  53         goto out;
  54 
  55 err_out:
  56         rc = -EINVAL;
  57         pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
  58                         __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
  59 out:
  60         return rc;
  61 }
  62 
  63 unsigned long __pfn_to_mfn(unsigned long pfn)
  64 {
  65         struct rb_node *n = phys_to_mach.rb_node;
  66         struct xen_p2m_entry *entry;
  67         unsigned long irqflags;
  68 
  69         read_lock_irqsave(&p2m_lock, irqflags);
  70         while (n) {
  71                 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
  72                 if (entry->pfn <= pfn &&
  73                                 entry->pfn + entry->nr_pages > pfn) {
  74                         unsigned long mfn = entry->mfn + (pfn - entry->pfn);
  75                         read_unlock_irqrestore(&p2m_lock, irqflags);
  76                         return mfn;
  77                 }
  78                 if (pfn < entry->pfn)
  79                         n = n->rb_left;
  80                 else
  81                         n = n->rb_right;
  82         }
  83         read_unlock_irqrestore(&p2m_lock, irqflags);
  84 
  85         return INVALID_P2M_ENTRY;
  86 }
  87 EXPORT_SYMBOL_GPL(__pfn_to_mfn);
  88 
  89 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
  90                             struct gnttab_map_grant_ref *kmap_ops,
  91                             struct page **pages, unsigned int count)
  92 {
  93         int i;
  94 
  95         for (i = 0; i < count; i++) {
  96                 if (map_ops[i].status)
  97                         continue;
  98                 set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
  99                                     map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
 100         }
 101 
 102         return 0;
 103 }
 104 EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
 105 
 106 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
 107                               struct gnttab_unmap_grant_ref *kunmap_ops,
 108                               struct page **pages, unsigned int count)
 109 {
 110         int i;
 111 
 112         for (i = 0; i < count; i++) {
 113                 set_phys_to_machine(unmap_ops[i].host_addr >> XEN_PAGE_SHIFT,
 114                                     INVALID_P2M_ENTRY);
 115         }
 116 
 117         return 0;
 118 }
 119 EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
 120 
 121 bool __set_phys_to_machine_multi(unsigned long pfn,
 122                 unsigned long mfn, unsigned long nr_pages)
 123 {
 124         int rc;
 125         unsigned long irqflags;
 126         struct xen_p2m_entry *p2m_entry;
 127         struct rb_node *n = phys_to_mach.rb_node;
 128 
 129         if (mfn == INVALID_P2M_ENTRY) {
 130                 write_lock_irqsave(&p2m_lock, irqflags);
 131                 while (n) {
 132                         p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
 133                         if (p2m_entry->pfn <= pfn &&
 134                                         p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
 135                                 rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
 136                                 write_unlock_irqrestore(&p2m_lock, irqflags);
 137                                 kfree(p2m_entry);
 138                                 return true;
 139                         }
 140                         if (pfn < p2m_entry->pfn)
 141                                 n = n->rb_left;
 142                         else
 143                                 n = n->rb_right;
 144                 }
 145                 write_unlock_irqrestore(&p2m_lock, irqflags);
 146                 return true;
 147         }
 148 
 149         p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
 150         if (!p2m_entry)
 151                 return false;
 152 
 153         p2m_entry->pfn = pfn;
 154         p2m_entry->nr_pages = nr_pages;
 155         p2m_entry->mfn = mfn;
 156 
 157         write_lock_irqsave(&p2m_lock, irqflags);
 158         rc = xen_add_phys_to_mach_entry(p2m_entry);
 159         if (rc < 0) {
 160                 write_unlock_irqrestore(&p2m_lock, irqflags);
 161                 kfree(p2m_entry);
 162                 return false;
 163         }
 164         write_unlock_irqrestore(&p2m_lock, irqflags);
 165         return true;
 166 }
 167 EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
 168 
 169 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 170 {
 171         return __set_phys_to_machine_multi(pfn, mfn, 1);
 172 }
 173 EXPORT_SYMBOL_GPL(__set_phys_to_machine);
 174 
 175 static int p2m_init(void)
 176 {
 177         rwlock_init(&p2m_lock);
 178         return 0;
 179 }
 180 arch_initcall(p2m_init);

/* [<][>][^][v][top][bottom][index][help] */