root/arch/x86/mm/pat_rbtree.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_node_overlap
  2. get_subtree_max_end
  3. RB_DECLARE_CALLBACKS_MAX
  4. memtype_rb_match
  5. memtype_rb_check_conflict
  6. memtype_rb_insert
  7. rbt_memtype_check_insert
  8. rbt_memtype_erase
  9. rbt_memtype_lookup
  10. rbt_memtype_copy_nth_element

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Handle caching attributes in page tables (PAT)
   4  *
   5  * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
   6  *          Suresh B Siddha <suresh.b.siddha@intel.com>
   7  *
   8  * Interval tree (augmented rbtree) used to store the PAT memory type
   9  * reservations.
  10  */
  11 
  12 #include <linux/seq_file.h>
  13 #include <linux/debugfs.h>
  14 #include <linux/kernel.h>
  15 #include <linux/rbtree_augmented.h>
  16 #include <linux/sched.h>
  17 #include <linux/gfp.h>
  18 
  19 #include <asm/pgtable.h>
  20 #include <asm/pat.h>
  21 
  22 #include "pat_internal.h"
  23 
  24 /*
  25  * The memtype tree keeps track of memory type for specific
  26  * physical memory areas. Without proper tracking, conflicting memory
  27  * types in different mappings can cause CPU cache corruption.
  28  *
  29  * The tree is an interval tree (augmented rbtree) with tree ordered
  30  * on starting address. Tree can contain multiple entries for
  31  * different regions which overlap. All the aliases have the same
  32  * cache attributes of course.
  33  *
  34  * memtype_lock protects the rbtree.
  35  */
  36 
  37 static struct rb_root memtype_rbroot = RB_ROOT;
  38 
  39 static int is_node_overlap(struct memtype *node, u64 start, u64 end)
  40 {
  41         if (node->start >= end || node->end <= start)
  42                 return 0;
  43 
  44         return 1;
  45 }
  46 
  47 static u64 get_subtree_max_end(struct rb_node *node)
  48 {
  49         u64 ret = 0;
  50         if (node) {
  51                 struct memtype *data = rb_entry(node, struct memtype, rb);
  52                 ret = data->subtree_max_end;
  53         }
  54         return ret;
  55 }
  56 
  57 #define NODE_END(node) ((node)->end)
  58 
  59 RB_DECLARE_CALLBACKS_MAX(static, memtype_rb_augment_cb,
  60                          struct memtype, rb, u64, subtree_max_end, NODE_END)
  61 
  62 /* Find the first (lowest start addr) overlapping range from rb tree */
  63 static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
  64                                 u64 start, u64 end)
  65 {
  66         struct rb_node *node = root->rb_node;
  67         struct memtype *last_lower = NULL;
  68 
  69         while (node) {
  70                 struct memtype *data = rb_entry(node, struct memtype, rb);
  71 
  72                 if (get_subtree_max_end(node->rb_left) > start) {
  73                         /* Lowest overlap if any must be on left side */
  74                         node = node->rb_left;
  75                 } else if (is_node_overlap(data, start, end)) {
  76                         last_lower = data;
  77                         break;
  78                 } else if (start >= data->start) {
  79                         /* Lowest overlap if any must be on right side */
  80                         node = node->rb_right;
  81                 } else {
  82                         break;
  83                 }
  84         }
  85         return last_lower; /* Returns NULL if there is no overlap */
  86 }
  87 
  88 enum {
  89         MEMTYPE_EXACT_MATCH     = 0,
  90         MEMTYPE_END_MATCH       = 1
  91 };
  92 
  93 static struct memtype *memtype_rb_match(struct rb_root *root,
  94                                 u64 start, u64 end, int match_type)
  95 {
  96         struct memtype *match;
  97 
  98         match = memtype_rb_lowest_match(root, start, end);
  99         while (match != NULL && match->start < end) {
 100                 struct rb_node *node;
 101 
 102                 if ((match_type == MEMTYPE_EXACT_MATCH) &&
 103                     (match->start == start) && (match->end == end))
 104                         return match;
 105 
 106                 if ((match_type == MEMTYPE_END_MATCH) &&
 107                     (match->start < start) && (match->end == end))
 108                         return match;
 109 
 110                 node = rb_next(&match->rb);
 111                 if (node)
 112                         match = rb_entry(node, struct memtype, rb);
 113                 else
 114                         match = NULL;
 115         }
 116 
 117         return NULL; /* Returns NULL if there is no match */
 118 }
 119 
 120 static int memtype_rb_check_conflict(struct rb_root *root,
 121                                 u64 start, u64 end,
 122                                 enum page_cache_mode reqtype,
 123                                 enum page_cache_mode *newtype)
 124 {
 125         struct rb_node *node;
 126         struct memtype *match;
 127         enum page_cache_mode found_type = reqtype;
 128 
 129         match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
 130         if (match == NULL)
 131                 goto success;
 132 
 133         if (match->type != found_type && newtype == NULL)
 134                 goto failure;
 135 
 136         dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
 137         found_type = match->type;
 138 
 139         node = rb_next(&match->rb);
 140         while (node) {
 141                 match = rb_entry(node, struct memtype, rb);
 142 
 143                 if (match->start >= end) /* Checked all possible matches */
 144                         goto success;
 145 
 146                 if (is_node_overlap(match, start, end) &&
 147                     match->type != found_type) {
 148                         goto failure;
 149                 }
 150 
 151                 node = rb_next(&match->rb);
 152         }
 153 success:
 154         if (newtype)
 155                 *newtype = found_type;
 156 
 157         return 0;
 158 
 159 failure:
 160         pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
 161                 current->comm, current->pid, start, end,
 162                 cattr_name(found_type), cattr_name(match->type));
 163         return -EBUSY;
 164 }
 165 
 166 static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
 167 {
 168         struct rb_node **node = &(root->rb_node);
 169         struct rb_node *parent = NULL;
 170 
 171         while (*node) {
 172                 struct memtype *data = rb_entry(*node, struct memtype, rb);
 173 
 174                 parent = *node;
 175                 if (data->subtree_max_end < newdata->end)
 176                         data->subtree_max_end = newdata->end;
 177                 if (newdata->start <= data->start)
 178                         node = &((*node)->rb_left);
 179                 else if (newdata->start > data->start)
 180                         node = &((*node)->rb_right);
 181         }
 182 
 183         newdata->subtree_max_end = newdata->end;
 184         rb_link_node(&newdata->rb, parent, node);
 185         rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb);
 186 }
 187 
 188 int rbt_memtype_check_insert(struct memtype *new,
 189                              enum page_cache_mode *ret_type)
 190 {
 191         int err = 0;
 192 
 193         err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
 194                                                 new->type, ret_type);
 195 
 196         if (!err) {
 197                 if (ret_type)
 198                         new->type = *ret_type;
 199 
 200                 new->subtree_max_end = new->end;
 201                 memtype_rb_insert(&memtype_rbroot, new);
 202         }
 203         return err;
 204 }
 205 
 206 struct memtype *rbt_memtype_erase(u64 start, u64 end)
 207 {
 208         struct memtype *data;
 209 
 210         /*
 211          * Since the memtype_rbroot tree allows overlapping ranges,
 212          * rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free
 213          * a whole node for the munmap case.  If no such entry is found,
 214          * it then checks with END_MATCH, i.e. shrink the size of a node
 215          * from the end for the mremap case.
 216          */
 217         data = memtype_rb_match(&memtype_rbroot, start, end,
 218                                 MEMTYPE_EXACT_MATCH);
 219         if (!data) {
 220                 data = memtype_rb_match(&memtype_rbroot, start, end,
 221                                         MEMTYPE_END_MATCH);
 222                 if (!data)
 223                         return ERR_PTR(-EINVAL);
 224         }
 225 
 226         if (data->start == start) {
 227                 /* munmap: erase this node */
 228                 rb_erase_augmented(&data->rb, &memtype_rbroot,
 229                                         &memtype_rb_augment_cb);
 230         } else {
 231                 /* mremap: update the end value of this node */
 232                 rb_erase_augmented(&data->rb, &memtype_rbroot,
 233                                         &memtype_rb_augment_cb);
 234                 data->end = start;
 235                 data->subtree_max_end = data->end;
 236                 memtype_rb_insert(&memtype_rbroot, data);
 237                 return NULL;
 238         }
 239 
 240         return data;
 241 }
 242 
 243 struct memtype *rbt_memtype_lookup(u64 addr)
 244 {
 245         return memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
 246 }
 247 
 248 #if defined(CONFIG_DEBUG_FS)
 249 int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
 250 {
 251         struct rb_node *node;
 252         int i = 1;
 253 
 254         node = rb_first(&memtype_rbroot);
 255         while (node && pos != i) {
 256                 node = rb_next(node);
 257                 i++;
 258         }
 259 
 260         if (node) { /* pos == i */
 261                 struct memtype *this = rb_entry(node, struct memtype, rb);
 262                 *out = *this;
 263                 return 0;
 264         } else {
 265                 return 1;
 266         }
 267 }
 268 #endif

/* [<][>][^][v][top][bottom][index][help] */