root/drivers/nvdimm/core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nvdimm_bus_lock
  2. nvdimm_bus_unlock
  3. is_nvdimm_bus_locked
  4. find_nvdimm_map
  5. alloc_nvdimm_map
  6. nvdimm_map_release
  7. nvdimm_map_put
  8. devm_nvdimm_memremap
  9. nd_fletcher64
  10. to_nd_desc
  11. to_nvdimm_bus_dev
  12. is_uuid_sep
  13. nd_uuid_parse
  14. nd_uuid_store
  15. nd_size_select_show
  16. nd_size_select_store
  17. commands_show
  18. nvdimm_bus_provider
  19. provider_show
  20. flush_namespaces
  21. flush_regions_dimms
  22. wait_probe_show
  23. nvdimm_bus_add_badrange
  24. nd_integrity_init
  25. nd_integrity_init
  26. libnvdimm_init
  27. libnvdimm_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   4  */
   5 #include <linux/libnvdimm.h>
   6 #include <linux/badblocks.h>
   7 #include <linux/export.h>
   8 #include <linux/module.h>
   9 #include <linux/blkdev.h>
  10 #include <linux/device.h>
  11 #include <linux/ctype.h>
  12 #include <linux/ndctl.h>
  13 #include <linux/mutex.h>
  14 #include <linux/slab.h>
  15 #include <linux/io.h>
  16 #include "nd-core.h"
  17 #include "nd.h"
  18 
  19 LIST_HEAD(nvdimm_bus_list);
  20 DEFINE_MUTEX(nvdimm_bus_list_mutex);
  21 
  22 void nvdimm_bus_lock(struct device *dev)
  23 {
  24         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  25 
  26         if (!nvdimm_bus)
  27                 return;
  28         mutex_lock(&nvdimm_bus->reconfig_mutex);
  29 }
  30 EXPORT_SYMBOL(nvdimm_bus_lock);
  31 
  32 void nvdimm_bus_unlock(struct device *dev)
  33 {
  34         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  35 
  36         if (!nvdimm_bus)
  37                 return;
  38         mutex_unlock(&nvdimm_bus->reconfig_mutex);
  39 }
  40 EXPORT_SYMBOL(nvdimm_bus_unlock);
  41 
  42 bool is_nvdimm_bus_locked(struct device *dev)
  43 {
  44         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  45 
  46         if (!nvdimm_bus)
  47                 return false;
  48         return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
  49 }
  50 EXPORT_SYMBOL(is_nvdimm_bus_locked);
  51 
  52 struct nvdimm_map {
  53         struct nvdimm_bus *nvdimm_bus;
  54         struct list_head list;
  55         resource_size_t offset;
  56         unsigned long flags;
  57         size_t size;
  58         union {
  59                 void *mem;
  60                 void __iomem *iomem;
  61         };
  62         struct kref kref;
  63 };
  64 
  65 static struct nvdimm_map *find_nvdimm_map(struct device *dev,
  66                 resource_size_t offset)
  67 {
  68         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  69         struct nvdimm_map *nvdimm_map;
  70 
  71         list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
  72                 if (nvdimm_map->offset == offset)
  73                         return nvdimm_map;
  74         return NULL;
  75 }
  76 
  77 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
  78                 resource_size_t offset, size_t size, unsigned long flags)
  79 {
  80         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  81         struct nvdimm_map *nvdimm_map;
  82 
  83         nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
  84         if (!nvdimm_map)
  85                 return NULL;
  86 
  87         INIT_LIST_HEAD(&nvdimm_map->list);
  88         nvdimm_map->nvdimm_bus = nvdimm_bus;
  89         nvdimm_map->offset = offset;
  90         nvdimm_map->flags = flags;
  91         nvdimm_map->size = size;
  92         kref_init(&nvdimm_map->kref);
  93 
  94         if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
  95                 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
  96                                 &offset, size, dev_name(dev));
  97                 goto err_request_region;
  98         }
  99 
 100         if (flags)
 101                 nvdimm_map->mem = memremap(offset, size, flags);
 102         else
 103                 nvdimm_map->iomem = ioremap(offset, size);
 104 
 105         if (!nvdimm_map->mem)
 106                 goto err_map;
 107 
 108         dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
 109                         __func__);
 110         list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
 111 
 112         return nvdimm_map;
 113 
 114  err_map:
 115         release_mem_region(offset, size);
 116  err_request_region:
 117         kfree(nvdimm_map);
 118         return NULL;
 119 }
 120 
 121 static void nvdimm_map_release(struct kref *kref)
 122 {
 123         struct nvdimm_bus *nvdimm_bus;
 124         struct nvdimm_map *nvdimm_map;
 125 
 126         nvdimm_map = container_of(kref, struct nvdimm_map, kref);
 127         nvdimm_bus = nvdimm_map->nvdimm_bus;
 128 
 129         dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
 130         list_del(&nvdimm_map->list);
 131         if (nvdimm_map->flags)
 132                 memunmap(nvdimm_map->mem);
 133         else
 134                 iounmap(nvdimm_map->iomem);
 135         release_mem_region(nvdimm_map->offset, nvdimm_map->size);
 136         kfree(nvdimm_map);
 137 }
 138 
 139 static void nvdimm_map_put(void *data)
 140 {
 141         struct nvdimm_map *nvdimm_map = data;
 142         struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
 143 
 144         nvdimm_bus_lock(&nvdimm_bus->dev);
 145         kref_put(&nvdimm_map->kref, nvdimm_map_release);
 146         nvdimm_bus_unlock(&nvdimm_bus->dev);
 147 }
 148 
 149 /**
 150  * devm_nvdimm_memremap - map a resource that is shared across regions
 151  * @dev: device that will own a reference to the shared mapping
 152  * @offset: physical base address of the mapping
 153  * @size: mapping size
 154  * @flags: memremap flags, or, if zero, perform an ioremap instead
 155  */
 156 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
 157                 size_t size, unsigned long flags)
 158 {
 159         struct nvdimm_map *nvdimm_map;
 160 
 161         nvdimm_bus_lock(dev);
 162         nvdimm_map = find_nvdimm_map(dev, offset);
 163         if (!nvdimm_map)
 164                 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
 165         else
 166                 kref_get(&nvdimm_map->kref);
 167         nvdimm_bus_unlock(dev);
 168 
 169         if (!nvdimm_map)
 170                 return NULL;
 171 
 172         if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
 173                 return NULL;
 174 
 175         return nvdimm_map->mem;
 176 }
 177 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
 178 
 179 u64 nd_fletcher64(void *addr, size_t len, bool le)
 180 {
 181         u32 *buf = addr;
 182         u32 lo32 = 0;
 183         u64 hi32 = 0;
 184         int i;
 185 
 186         for (i = 0; i < len / sizeof(u32); i++) {
 187                 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
 188                 hi32 += lo32;
 189         }
 190 
 191         return hi32 << 32 | lo32;
 192 }
 193 EXPORT_SYMBOL_GPL(nd_fletcher64);
 194 
 195 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
 196 {
 197         /* struct nvdimm_bus definition is private to libnvdimm */
 198         return nvdimm_bus->nd_desc;
 199 }
 200 EXPORT_SYMBOL_GPL(to_nd_desc);
 201 
 202 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
 203 {
 204         /* struct nvdimm_bus definition is private to libnvdimm */
 205         return &nvdimm_bus->dev;
 206 }
 207 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
 208 
 209 static bool is_uuid_sep(char sep)
 210 {
 211         if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
 212                 return true;
 213         return false;
 214 }
 215 
 216 static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
 217                 size_t len)
 218 {
 219         const char *str = buf;
 220         u8 uuid[16];
 221         int i;
 222 
 223         for (i = 0; i < 16; i++) {
 224                 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
 225                         dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
 226                                         i, str - buf, str[0],
 227                                         str + 1 - buf, str[1]);
 228                         return -EINVAL;
 229                 }
 230 
 231                 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
 232                 str += 2;
 233                 if (is_uuid_sep(*str))
 234                         str++;
 235         }
 236 
 237         memcpy(uuid_out, uuid, sizeof(uuid));
 238         return 0;
 239 }
 240 
 241 /**
 242  * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
 243  * @dev: container device for the uuid property
 244  * @uuid_out: uuid buffer to replace
 245  * @buf: raw sysfs buffer to parse
 246  *
 247  * Enforce that uuids can only be changed while the device is disabled
 248  * (driver detached)
 249  * LOCKING: expects nd_device_lock() is held on entry
 250  */
 251 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
 252                 size_t len)
 253 {
 254         u8 uuid[16];
 255         int rc;
 256 
 257         if (dev->driver)
 258                 return -EBUSY;
 259 
 260         rc = nd_uuid_parse(dev, uuid, buf, len);
 261         if (rc)
 262                 return rc;
 263 
 264         kfree(*uuid_out);
 265         *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
 266         if (!(*uuid_out))
 267                 return -ENOMEM;
 268 
 269         return 0;
 270 }
 271 
 272 ssize_t nd_size_select_show(unsigned long current_size,
 273                 const unsigned long *supported, char *buf)
 274 {
 275         ssize_t len = 0;
 276         int i;
 277 
 278         for (i = 0; supported[i]; i++)
 279                 if (current_size == supported[i])
 280                         len += sprintf(buf + len, "[%ld] ", supported[i]);
 281                 else
 282                         len += sprintf(buf + len, "%ld ", supported[i]);
 283         len += sprintf(buf + len, "\n");
 284         return len;
 285 }
 286 
 287 ssize_t nd_size_select_store(struct device *dev, const char *buf,
 288                 unsigned long *current_size, const unsigned long *supported)
 289 {
 290         unsigned long lbasize;
 291         int rc, i;
 292 
 293         if (dev->driver)
 294                 return -EBUSY;
 295 
 296         rc = kstrtoul(buf, 0, &lbasize);
 297         if (rc)
 298                 return rc;
 299 
 300         for (i = 0; supported[i]; i++)
 301                 if (lbasize == supported[i])
 302                         break;
 303 
 304         if (supported[i]) {
 305                 *current_size = lbasize;
 306                 return 0;
 307         } else {
 308                 return -EINVAL;
 309         }
 310 }
 311 
 312 static ssize_t commands_show(struct device *dev,
 313                 struct device_attribute *attr, char *buf)
 314 {
 315         int cmd, len = 0;
 316         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
 317         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 318 
 319         for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
 320                 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
 321         len += sprintf(buf + len, "\n");
 322         return len;
 323 }
 324 static DEVICE_ATTR_RO(commands);
 325 
 326 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
 327 {
 328         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 329         struct device *parent = nvdimm_bus->dev.parent;
 330 
 331         if (nd_desc->provider_name)
 332                 return nd_desc->provider_name;
 333         else if (parent)
 334                 return dev_name(parent);
 335         else
 336                 return "unknown";
 337 }
 338 
 339 static ssize_t provider_show(struct device *dev,
 340                 struct device_attribute *attr, char *buf)
 341 {
 342         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
 343 
 344         return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
 345 }
 346 static DEVICE_ATTR_RO(provider);
 347 
 348 static int flush_namespaces(struct device *dev, void *data)
 349 {
 350         nd_device_lock(dev);
 351         nd_device_unlock(dev);
 352         return 0;
 353 }
 354 
 355 static int flush_regions_dimms(struct device *dev, void *data)
 356 {
 357         nd_device_lock(dev);
 358         nd_device_unlock(dev);
 359         device_for_each_child(dev, NULL, flush_namespaces);
 360         return 0;
 361 }
 362 
 363 static ssize_t wait_probe_show(struct device *dev,
 364                 struct device_attribute *attr, char *buf)
 365 {
 366         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
 367         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
 368         int rc;
 369 
 370         if (nd_desc->flush_probe) {
 371                 rc = nd_desc->flush_probe(nd_desc);
 372                 if (rc)
 373                         return rc;
 374         }
 375         nd_synchronize();
 376         device_for_each_child(dev, NULL, flush_regions_dimms);
 377         return sprintf(buf, "1\n");
 378 }
 379 static DEVICE_ATTR_RO(wait_probe);
 380 
 381 static struct attribute *nvdimm_bus_attributes[] = {
 382         &dev_attr_commands.attr,
 383         &dev_attr_wait_probe.attr,
 384         &dev_attr_provider.attr,
 385         NULL,
 386 };
 387 
 388 struct attribute_group nvdimm_bus_attribute_group = {
 389         .attrs = nvdimm_bus_attributes,
 390 };
 391 EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
 392 
 393 int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
 394 {
 395         return badrange_add(&nvdimm_bus->badrange, addr, length);
 396 }
 397 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
 398 
 399 #ifdef CONFIG_BLK_DEV_INTEGRITY
 400 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
 401 {
 402         struct blk_integrity bi;
 403 
 404         if (meta_size == 0)
 405                 return 0;
 406 
 407         memset(&bi, 0, sizeof(bi));
 408 
 409         bi.tuple_size = meta_size;
 410         bi.tag_size = meta_size;
 411 
 412         blk_integrity_register(disk, &bi);
 413         blk_queue_max_integrity_segments(disk->queue, 1);
 414 
 415         return 0;
 416 }
 417 EXPORT_SYMBOL(nd_integrity_init);
 418 
 419 #else /* CONFIG_BLK_DEV_INTEGRITY */
 420 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
 421 {
 422         return 0;
 423 }
 424 EXPORT_SYMBOL(nd_integrity_init);
 425 
 426 #endif
 427 
 428 static __init int libnvdimm_init(void)
 429 {
 430         int rc;
 431 
 432         rc = nvdimm_bus_init();
 433         if (rc)
 434                 return rc;
 435         rc = nvdimm_init();
 436         if (rc)
 437                 goto err_dimm;
 438         rc = nd_region_init();
 439         if (rc)
 440                 goto err_region;
 441 
 442         nd_label_init();
 443 
 444         return 0;
 445  err_region:
 446         nvdimm_exit();
 447  err_dimm:
 448         nvdimm_bus_exit();
 449         return rc;
 450 }
 451 
 452 static __exit void libnvdimm_exit(void)
 453 {
 454         WARN_ON(!list_empty(&nvdimm_bus_list));
 455         nd_region_exit();
 456         nvdimm_exit();
 457         nvdimm_bus_exit();
 458         nd_region_devs_exit();
 459         nvdimm_devs_exit();
 460 }
 461 
 462 MODULE_LICENSE("GPL v2");
 463 MODULE_AUTHOR("Intel Corporation");
 464 subsys_initcall(libnvdimm_init);
 465 module_exit(libnvdimm_exit);

/* [<][>][^][v][top][bottom][index][help] */