root/arch/powerpc/mm/drmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. drmem_lmb_memory_max
  2. drmem_lmb_flags
  3. clone_property
  4. drmem_update_dt_v1
  5. init_drconf_v2_cell
  6. drmem_update_dt_v2
  7. drmem_update_dt
  8. read_drconf_v1_cell
  9. __walk_drmem_v1_lmbs
  10. read_drconf_v2_cell
  11. __walk_drmem_v2_lmbs
  12. walk_drmem_lmbs_early
  13. init_drmem_lmb_size
  14. of_get_usable_memory
  15. walk_drmem_lmbs
  16. init_drmem_v1_lmbs
  17. init_drmem_v2_lmbs
  18. drmem_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Dynamic reconfiguration memory support
   4  *
   5  * Copyright 2017 IBM Corporation
   6  */
   7 
   8 #define pr_fmt(fmt) "drmem: " fmt
   9 
  10 #include <linux/kernel.h>
  11 #include <linux/of.h>
  12 #include <linux/of_fdt.h>
  13 #include <linux/memblock.h>
  14 #include <asm/prom.h>
  15 #include <asm/drmem.h>
  16 
  17 static struct drmem_lmb_info __drmem_info;
  18 struct drmem_lmb_info *drmem_info = &__drmem_info;
  19 
  20 u64 drmem_lmb_memory_max(void)
  21 {
  22         struct drmem_lmb *last_lmb;
  23 
  24         last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
  25         return last_lmb->base_addr + drmem_lmb_size();
  26 }
  27 
  28 static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
  29 {
  30         /*
  31          * Return the value of the lmb flags field minus the reserved
  32          * bit used internally for hotplug processing.
  33          */
  34         return lmb->flags & ~DRMEM_LMB_RESERVED;
  35 }
  36 
  37 static struct property *clone_property(struct property *prop, u32 prop_sz)
  38 {
  39         struct property *new_prop;
  40 
  41         new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  42         if (!new_prop)
  43                 return NULL;
  44 
  45         new_prop->name = kstrdup(prop->name, GFP_KERNEL);
  46         new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
  47         if (!new_prop->name || !new_prop->value) {
  48                 kfree(new_prop->name);
  49                 kfree(new_prop->value);
  50                 kfree(new_prop);
  51                 return NULL;
  52         }
  53 
  54         new_prop->length = prop_sz;
  55 #if defined(CONFIG_OF_DYNAMIC)
  56         of_property_set_flag(new_prop, OF_DYNAMIC);
  57 #endif
  58         return new_prop;
  59 }
  60 
  61 static int drmem_update_dt_v1(struct device_node *memory,
  62                               struct property *prop)
  63 {
  64         struct property *new_prop;
  65         struct of_drconf_cell_v1 *dr_cell;
  66         struct drmem_lmb *lmb;
  67         u32 *p;
  68 
  69         new_prop = clone_property(prop, prop->length);
  70         if (!new_prop)
  71                 return -1;
  72 
  73         p = new_prop->value;
  74         *p++ = cpu_to_be32(drmem_info->n_lmbs);
  75 
  76         dr_cell = (struct of_drconf_cell_v1 *)p;
  77 
  78         for_each_drmem_lmb(lmb) {
  79                 dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
  80                 dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
  81                 dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
  82                 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
  83 
  84                 dr_cell++;
  85         }
  86 
  87         of_update_property(memory, new_prop);
  88         return 0;
  89 }
  90 
  91 static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
  92                                 struct drmem_lmb *lmb)
  93 {
  94         dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
  95         dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
  96         dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
  97         dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
  98 }
  99 
 100 static int drmem_update_dt_v2(struct device_node *memory,
 101                               struct property *prop)
 102 {
 103         struct property *new_prop;
 104         struct of_drconf_cell_v2 *dr_cell;
 105         struct drmem_lmb *lmb, *prev_lmb;
 106         u32 lmb_sets, prop_sz, seq_lmbs;
 107         u32 *p;
 108 
 109         /* First pass, determine how many LMB sets are needed. */
 110         lmb_sets = 0;
 111         prev_lmb = NULL;
 112         for_each_drmem_lmb(lmb) {
 113                 if (!prev_lmb) {
 114                         prev_lmb = lmb;
 115                         lmb_sets++;
 116                         continue;
 117                 }
 118 
 119                 if (prev_lmb->aa_index != lmb->aa_index ||
 120                     drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
 121                         lmb_sets++;
 122 
 123                 prev_lmb = lmb;
 124         }
 125 
 126         prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
 127         new_prop = clone_property(prop, prop_sz);
 128         if (!new_prop)
 129                 return -1;
 130 
 131         p = new_prop->value;
 132         *p++ = cpu_to_be32(lmb_sets);
 133 
 134         dr_cell = (struct of_drconf_cell_v2 *)p;
 135 
 136         /* Second pass, populate the LMB set data */
 137         prev_lmb = NULL;
 138         seq_lmbs = 0;
 139         for_each_drmem_lmb(lmb) {
 140                 if (prev_lmb == NULL) {
 141                         /* Start of first LMB set */
 142                         prev_lmb = lmb;
 143                         init_drconf_v2_cell(dr_cell, lmb);
 144                         seq_lmbs++;
 145                         continue;
 146                 }
 147 
 148                 if (prev_lmb->aa_index != lmb->aa_index ||
 149                     drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
 150                         /* end of one set, start of another */
 151                         dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
 152                         dr_cell++;
 153 
 154                         init_drconf_v2_cell(dr_cell, lmb);
 155                         seq_lmbs = 1;
 156                 } else {
 157                         seq_lmbs++;
 158                 }
 159 
 160                 prev_lmb = lmb;
 161         }
 162 
 163         /* close out last LMB set */
 164         dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
 165         of_update_property(memory, new_prop);
 166         return 0;
 167 }
 168 
 169 int drmem_update_dt(void)
 170 {
 171         struct device_node *memory;
 172         struct property *prop;
 173         int rc = -1;
 174 
 175         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 176         if (!memory)
 177                 return -1;
 178 
 179         prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
 180         if (prop) {
 181                 rc = drmem_update_dt_v1(memory, prop);
 182         } else {
 183                 prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
 184                 if (prop)
 185                         rc = drmem_update_dt_v2(memory, prop);
 186         }
 187 
 188         of_node_put(memory);
 189         return rc;
 190 }
 191 
 192 static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
 193                                        const __be32 **prop)
 194 {
 195         const __be32 *p = *prop;
 196 
 197         lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
 198         lmb->drc_index = of_read_number(p++, 1);
 199 
 200         p++; /* skip reserved field */
 201 
 202         lmb->aa_index = of_read_number(p++, 1);
 203         lmb->flags = of_read_number(p++, 1);
 204 
 205         *prop = p;
 206 }
 207 
 208 static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
 209                         void (*func)(struct drmem_lmb *, const __be32 **))
 210 {
 211         struct drmem_lmb lmb;
 212         u32 i, n_lmbs;
 213 
 214         n_lmbs = of_read_number(prop++, 1);
 215         if (n_lmbs == 0)
 216                 return;
 217 
 218         for (i = 0; i < n_lmbs; i++) {
 219                 read_drconf_v1_cell(&lmb, &prop);
 220                 func(&lmb, &usm);
 221         }
 222 }
 223 
 224 static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
 225                                        const __be32 **prop)
 226 {
 227         const __be32 *p = *prop;
 228 
 229         dr_cell->seq_lmbs = of_read_number(p++, 1);
 230         dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
 231         dr_cell->drc_index = of_read_number(p++, 1);
 232         dr_cell->aa_index = of_read_number(p++, 1);
 233         dr_cell->flags = of_read_number(p++, 1);
 234 
 235         *prop = p;
 236 }
 237 
 238 static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
 239                         void (*func)(struct drmem_lmb *, const __be32 **))
 240 {
 241         struct of_drconf_cell_v2 dr_cell;
 242         struct drmem_lmb lmb;
 243         u32 i, j, lmb_sets;
 244 
 245         lmb_sets = of_read_number(prop++, 1);
 246         if (lmb_sets == 0)
 247                 return;
 248 
 249         for (i = 0; i < lmb_sets; i++) {
 250                 read_drconf_v2_cell(&dr_cell, &prop);
 251 
 252                 for (j = 0; j < dr_cell.seq_lmbs; j++) {
 253                         lmb.base_addr = dr_cell.base_addr;
 254                         dr_cell.base_addr += drmem_lmb_size();
 255 
 256                         lmb.drc_index = dr_cell.drc_index;
 257                         dr_cell.drc_index++;
 258 
 259                         lmb.aa_index = dr_cell.aa_index;
 260                         lmb.flags = dr_cell.flags;
 261 
 262                         func(&lmb, &usm);
 263                 }
 264         }
 265 }
 266 
 267 #ifdef CONFIG_PPC_PSERIES
 268 void __init walk_drmem_lmbs_early(unsigned long node,
 269                         void (*func)(struct drmem_lmb *, const __be32 **))
 270 {
 271         const __be32 *prop, *usm;
 272         int len;
 273 
 274         prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
 275         if (!prop || len < dt_root_size_cells * sizeof(__be32))
 276                 return;
 277 
 278         drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
 279 
 280         usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
 281 
 282         prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
 283         if (prop) {
 284                 __walk_drmem_v1_lmbs(prop, usm, func);
 285         } else {
 286                 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
 287                                            &len);
 288                 if (prop)
 289                         __walk_drmem_v2_lmbs(prop, usm, func);
 290         }
 291 
 292         memblock_dump_all();
 293 }
 294 
 295 #endif
 296 
 297 static int __init init_drmem_lmb_size(struct device_node *dn)
 298 {
 299         const __be32 *prop;
 300         int len;
 301 
 302         if (drmem_info->lmb_size)
 303                 return 0;
 304 
 305         prop = of_get_property(dn, "ibm,lmb-size", &len);
 306         if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
 307                 pr_info("Could not determine LMB size\n");
 308                 return -1;
 309         }
 310 
 311         drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
 312         return 0;
 313 }
 314 
 315 /*
 316  * Returns the property linux,drconf-usable-memory if
 317  * it exists (the property exists only in kexec/kdump kernels,
 318  * added by kexec-tools)
 319  */
 320 static const __be32 *of_get_usable_memory(struct device_node *dn)
 321 {
 322         const __be32 *prop;
 323         u32 len;
 324 
 325         prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
 326         if (!prop || len < sizeof(unsigned int))
 327                 return NULL;
 328 
 329         return prop;
 330 }
 331 
 332 void __init walk_drmem_lmbs(struct device_node *dn,
 333                             void (*func)(struct drmem_lmb *, const __be32 **))
 334 {
 335         const __be32 *prop, *usm;
 336 
 337         if (init_drmem_lmb_size(dn))
 338                 return;
 339 
 340         usm = of_get_usable_memory(dn);
 341 
 342         prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
 343         if (prop) {
 344                 __walk_drmem_v1_lmbs(prop, usm, func);
 345         } else {
 346                 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
 347                 if (prop)
 348                         __walk_drmem_v2_lmbs(prop, usm, func);
 349         }
 350 }
 351 
 352 static void __init init_drmem_v1_lmbs(const __be32 *prop)
 353 {
 354         struct drmem_lmb *lmb;
 355 
 356         drmem_info->n_lmbs = of_read_number(prop++, 1);
 357         if (drmem_info->n_lmbs == 0)
 358                 return;
 359 
 360         drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
 361                                    GFP_KERNEL);
 362         if (!drmem_info->lmbs)
 363                 return;
 364 
 365         for_each_drmem_lmb(lmb) {
 366                 read_drconf_v1_cell(lmb, &prop);
 367                 lmb_set_nid(lmb);
 368         }
 369 }
 370 
 371 static void __init init_drmem_v2_lmbs(const __be32 *prop)
 372 {
 373         struct drmem_lmb *lmb;
 374         struct of_drconf_cell_v2 dr_cell;
 375         const __be32 *p;
 376         u32 i, j, lmb_sets;
 377         int lmb_index;
 378 
 379         lmb_sets = of_read_number(prop++, 1);
 380         if (lmb_sets == 0)
 381                 return;
 382 
 383         /* first pass, calculate the number of LMBs */
 384         p = prop;
 385         for (i = 0; i < lmb_sets; i++) {
 386                 read_drconf_v2_cell(&dr_cell, &p);
 387                 drmem_info->n_lmbs += dr_cell.seq_lmbs;
 388         }
 389 
 390         drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
 391                                    GFP_KERNEL);
 392         if (!drmem_info->lmbs)
 393                 return;
 394 
 395         /* second pass, read in the LMB information */
 396         lmb_index = 0;
 397         p = prop;
 398 
 399         for (i = 0; i < lmb_sets; i++) {
 400                 read_drconf_v2_cell(&dr_cell, &p);
 401 
 402                 for (j = 0; j < dr_cell.seq_lmbs; j++) {
 403                         lmb = &drmem_info->lmbs[lmb_index++];
 404 
 405                         lmb->base_addr = dr_cell.base_addr;
 406                         dr_cell.base_addr += drmem_info->lmb_size;
 407 
 408                         lmb->drc_index = dr_cell.drc_index;
 409                         dr_cell.drc_index++;
 410 
 411                         lmb->aa_index = dr_cell.aa_index;
 412                         lmb->flags = dr_cell.flags;
 413 
 414                         lmb_set_nid(lmb);
 415                 }
 416         }
 417 }
 418 
 419 static int __init drmem_init(void)
 420 {
 421         struct device_node *dn;
 422         const __be32 *prop;
 423 
 424         dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 425         if (!dn) {
 426                 pr_info("No dynamic reconfiguration memory found\n");
 427                 return 0;
 428         }
 429 
 430         if (init_drmem_lmb_size(dn)) {
 431                 of_node_put(dn);
 432                 return 0;
 433         }
 434 
 435         prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
 436         if (prop) {
 437                 init_drmem_v1_lmbs(prop);
 438         } else {
 439                 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
 440                 if (prop)
 441                         init_drmem_v2_lmbs(prop);
 442         }
 443 
 444         of_node_put(dn);
 445         return 0;
 446 }
 447 late_initcall(drmem_init);

/* [<][>][^][v][top][bottom][index][help] */