nd_region          40 arch/powerpc/platforms/pseries/papr_scm.c 	struct nd_region *region;
nd_region        2183 drivers/acpi/nfit/core.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region        2184 drivers/acpi/nfit/core.c 	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
nd_region        2430 drivers/acpi/nfit/core.c 	nvdimm_flush(nfit_blk->nd_region, NULL);
nd_region        2479 drivers/acpi/nfit/core.c 		nvdimm_flush(nfit_blk->nd_region, NULL);
nd_region        2490 drivers/acpi/nfit/core.c 	struct nd_region *nd_region = nfit_blk->nd_region;
nd_region        2494 drivers/acpi/nfit/core.c 	lane = nd_region_acquire_lane(nd_region);
nd_region        2506 drivers/acpi/nfit/core.c 	nd_region_release_lane(nd_region, lane);
nd_region        2573 drivers/acpi/nfit/core.c 	nfit_blk->nd_region = to_nd_region(dev);
nd_region        2627 drivers/acpi/nfit/core.c 	if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
nd_region        2731 drivers/acpi/nfit/core.c 	struct nd_region *nd_region = nfit_spa->nd_region;
nd_region        2763 drivers/acpi/nfit/core.c 	if (nd_region) {
nd_region        2764 drivers/acpi/nfit/core.c 		dev = nd_region_dev(nd_region);
nd_region        2765 drivers/acpi/nfit/core.c 		nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
nd_region        2898 drivers/acpi/nfit/core.c 		nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
nd_region        2900 drivers/acpi/nfit/core.c 		if (!nfit_spa->nd_region)
nd_region        2935 drivers/acpi/nfit/core.c 	if (nfit_spa->nd_region)
nd_region        3006 drivers/acpi/nfit/core.c 		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
nd_region        3008 drivers/acpi/nfit/core.c 		if (!nfit_spa->nd_region)
nd_region        3011 drivers/acpi/nfit/core.c 		nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
nd_region        3013 drivers/acpi/nfit/core.c 		if (!nfit_spa->nd_region)
nd_region        3016 drivers/acpi/nfit/core.c 		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
nd_region        3018 drivers/acpi/nfit/core.c 		if (!nfit_spa->nd_region)
nd_region        3175 drivers/acpi/nfit/core.c 		dev = nd_region_dev(nfit_spa->nd_region);
nd_region          68 drivers/acpi/nfit/mce.c 		nvdimm_region_notify(nfit_spa->nd_region,
nd_region         144 drivers/acpi/nfit/nfit.h 	struct nd_region *nd_region;
nd_region         276 drivers/acpi/nfit/nfit.h 	struct nd_region *nd_region;
nd_region          23 drivers/dax/pmem/core.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          56 drivers/dax/pmem/core.c 			nd_region->target_node, le32_to_cpu(pfn_sb->align),
nd_region         269 drivers/nvdimm/badrange.c void nvdimm_badblocks_populate(struct nd_region *nd_region,
nd_region         274 drivers/nvdimm/badrange.c 	if (!is_memory(&nd_region->dev)) {
nd_region         275 drivers/nvdimm/badrange.c 		dev_WARN_ONCE(&nd_region->dev, 1,
nd_region         279 drivers/nvdimm/badrange.c 	nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nd_region          54 drivers/nvdimm/blk.c 	struct nd_region *nd_region;
nd_region          58 drivers/nvdimm/blk.c 	nd_region = container_of(parent, struct nd_region, dev);
nd_region          59 drivers/nvdimm/blk.c 	return container_of(nd_region, struct nd_blk_region, nd_region);
nd_region        1209 drivers/nvdimm/btt.c 		lane = nd_region_acquire_lane(btt->nd_region);
nd_region        1279 drivers/nvdimm/btt.c 		nd_region_release_lane(btt->nd_region, lane);
nd_region        1291 drivers/nvdimm/btt.c 	nd_region_release_lane(btt->nd_region, lane);
nd_region        1325 drivers/nvdimm/btt.c 		lane = nd_region_acquire_lane(btt->nd_region);
nd_region        1342 drivers/nvdimm/btt.c 			nd_region_release_lane(btt->nd_region, lane);
nd_region        1403 drivers/nvdimm/btt.c 		nd_region_release_lane(btt->nd_region, lane);
nd_region        1421 drivers/nvdimm/btt.c 	nd_region_release_lane(btt->nd_region, lane);
nd_region        1592 drivers/nvdimm/btt.c 		u32 lbasize, u8 *uuid, struct nd_region *nd_region)
nd_region        1609 drivers/nvdimm/btt.c 	btt->nd_region = nd_region;
nd_region        1619 drivers/nvdimm/btt.c 	if (btt->init_state != INIT_READY && nd_region->ro) {
nd_region        1621 drivers/nvdimm/btt.c 				dev_name(&nd_region->dev));
nd_region        1674 drivers/nvdimm/btt.c 	struct nd_region *nd_region;
nd_region        1703 drivers/nvdimm/btt.c 	nd_region = to_nd_region(nd_btt->dev.parent);
nd_region        1705 drivers/nvdimm/btt.c 			nd_region);
nd_region         229 drivers/nvdimm/btt.h 	struct nd_region *nd_region;
nd_region          18 drivers/nvdimm/btt_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          23 drivers/nvdimm/btt_devs.c 	ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
nd_region         181 drivers/nvdimm/btt_devs.c static struct device *__nd_btt_create(struct nd_region *nd_region,
nd_region         192 drivers/nvdimm/btt_devs.c 	nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
nd_region         204 drivers/nvdimm/btt_devs.c 	dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
nd_region         205 drivers/nvdimm/btt_devs.c 	dev->parent = &nd_region->dev;
nd_region         218 drivers/nvdimm/btt_devs.c 	ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
nd_region         225 drivers/nvdimm/btt_devs.c struct device *nd_btt_create(struct nd_region *nd_region)
nd_region         227 drivers/nvdimm/btt_devs.c 	struct device *dev = __nd_btt_create(nd_region, 0, NULL, NULL);
nd_region         336 drivers/nvdimm/btt_devs.c 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
nd_region         351 drivers/nvdimm/btt_devs.c 	btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nd_region         159 drivers/nvdimm/bus.c void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
nd_region         161 drivers/nvdimm/bus.c 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nd_region         167 drivers/nvdimm/bus.c 	nd_device_notify(&nd_region->dev, event);
nd_region         178 drivers/nvdimm/bus.c 	struct nd_region *nd_region;
nd_region         186 drivers/nvdimm/bus.c 	nd_region = to_nd_region(dev);
nd_region         187 drivers/nvdimm/bus.c 	ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
nd_region         190 drivers/nvdimm/bus.c 	if (ctx->phys < nd_region->ndr_start
nd_region         194 drivers/nvdimm/bus.c 	sector = (ctx->phys - nd_region->ndr_start) / 512;
nd_region         195 drivers/nvdimm/bus.c 	badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
nd_region         197 drivers/nvdimm/bus.c 	if (nd_region->bb_state)
nd_region         198 drivers/nvdimm/bus.c 		sysfs_notify_dirent(nd_region->bb_state);
nd_region         629 drivers/nvdimm/bus.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region         636 drivers/nvdimm/bus.c 	if (disk_ro || nd_region->ro == disk_ro)
nd_region         640 drivers/nvdimm/bus.c 			dev_name(&nd_region->dev), disk->disk_name);
nd_region          78 drivers/nvdimm/claim.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          82 drivers/nvdimm/claim.c 		seed = nd_region->btt_seed;
nd_region          84 drivers/nvdimm/claim.c 		seed = nd_region->pfn_seed;
nd_region          86 drivers/nvdimm/claim.c 		seed = nd_region->dax_seed;
nd_region          15 drivers/nvdimm/dax_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          21 drivers/nvdimm/dax_devs.c 	ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
nd_region          53 drivers/nvdimm/dax_devs.c static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
nd_region          64 drivers/nvdimm/dax_devs.c 	nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
nd_region          71 drivers/nvdimm/dax_devs.c 	dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
nd_region          74 drivers/nvdimm/dax_devs.c 	dev->parent = &nd_region->dev;
nd_region          79 drivers/nvdimm/dax_devs.c struct device *nd_dax_create(struct nd_region *nd_region)
nd_region          84 drivers/nvdimm/dax_devs.c 	if (!is_memory(&nd_region->dev))
nd_region          87 drivers/nvdimm/dax_devs.c 	nd_dax = nd_dax_alloc(nd_region);
nd_region         101 drivers/nvdimm/dax_devs.c 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
nd_region         115 drivers/nvdimm/dax_devs.c 	nd_dax = nd_dax_alloc(nd_region);
nd_region         226 drivers/nvdimm/dimm_devs.c 	struct nd_region *nd_region = &ndbr->nd_region;
nd_region         227 drivers/nvdimm/dimm_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         564 drivers/nvdimm/dimm_devs.c 	struct nd_region *nd_region;
nd_region         572 drivers/nvdimm/dimm_devs.c 	nd_region = to_nd_region(dev);
nd_region         573 drivers/nvdimm/dimm_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         574 drivers/nvdimm/dimm_devs.c 		nd_mapping  = &nd_region->mapping[i];
nd_region         579 drivers/nvdimm/dimm_devs.c 	if (i >= nd_region->ndr_mappings)
nd_region         639 drivers/nvdimm/dimm_devs.c resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
nd_region         641 drivers/nvdimm/dimm_devs.c 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nd_region         642 drivers/nvdimm/dimm_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         672 drivers/nvdimm/dimm_devs.c resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
nd_region         685 drivers/nvdimm/dimm_devs.c 	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
nd_region         712 drivers/nvdimm/dimm_devs.c resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
nd_region         760 drivers/nvdimm/dimm_devs.c 	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
nd_region         386 drivers/nvdimm/label.c 		struct nd_region *nd_region = NULL;
nd_region         405 drivers/nvdimm/label.c 		nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
nd_region         759 drivers/nvdimm/label.c static int __pmem_label_update(struct nd_region *nd_region,
nd_region         764 drivers/nvdimm/label.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region         780 drivers/nvdimm/label.c 	cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
nd_region         803 drivers/nvdimm/label.c 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
nd_region         823 drivers/nvdimm/label.c 	nd_dbg_dpa(nd_region, ndd, res, "\n");
nd_region         897 drivers/nvdimm/label.c static int __blk_label_update(struct nd_region *nd_region,
nd_region         902 drivers/nvdimm/label.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region         977 drivers/nvdimm/label.c 		if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
nd_region        1244 drivers/nvdimm/label.c int nd_pmem_namespace_label_update(struct nd_region *nd_region,
nd_region        1249 drivers/nvdimm/label.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1250 drivers/nvdimm/label.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1271 drivers/nvdimm/label.c 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
nd_region        1281 drivers/nvdimm/label.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1282 drivers/nvdimm/label.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1284 drivers/nvdimm/label.c 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
nd_region        1292 drivers/nvdimm/label.c int nd_blk_namespace_label_update(struct nd_region *nd_region,
nd_region        1295 drivers/nvdimm/label.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region        1309 drivers/nvdimm/label.c 	return __blk_label_update(nd_region, nd_mapping, nsblk, count);
nd_region         139 drivers/nvdimm/label.h struct nd_region;
nd_region         142 drivers/nvdimm/label.h int nd_pmem_namespace_label_update(struct nd_region *nd_region,
nd_region         144 drivers/nvdimm/label.h int nd_blk_namespace_label_update(struct nd_region *nd_region,
nd_region          25 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          28 drivers/nvdimm/namespace_devs.c 		ida_simple_remove(&nd_region->ns_ida, nspm->id);
nd_region          37 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          40 drivers/nvdimm/namespace_devs.c 		ida_simple_remove(&nd_region->ns_ida, nsblk->id);
nd_region         132 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region         139 drivers/nvdimm/namespace_devs.c 	if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
nd_region         184 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
nd_region         201 drivers/nvdimm/namespace_devs.c 			sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
nd_region         204 drivers/nvdimm/namespace_devs.c 			sprintf(name, "pmem%d%s", nd_region->id,
nd_region         210 drivers/nvdimm/namespace_devs.c 		sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
nd_region         243 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region         245 drivers/nvdimm/namespace_devs.c 	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
nd_region         296 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
nd_region         297 drivers/nvdimm/namespace_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         314 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
nd_region         315 drivers/nvdimm/namespace_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         371 drivers/nvdimm/namespace_devs.c static int nd_namespace_label_update(struct nd_region *nd_region,
nd_region         392 drivers/nvdimm/namespace_devs.c 		return nd_pmem_namespace_label_update(nd_region, nspm, size);
nd_region         402 drivers/nvdimm/namespace_devs.c 		return nd_blk_namespace_label_update(nd_region, nsblk, size);
nd_region         410 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region         418 drivers/nvdimm/namespace_devs.c 		rc = nd_namespace_label_update(nd_region, dev);
nd_region         446 drivers/nvdimm/namespace_devs.c static int scan_free(struct nd_region *nd_region,
nd_region         468 drivers/nvdimm/namespace_devs.c 			nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
nd_region         486 drivers/nvdimm/namespace_devs.c 		nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
nd_region         504 drivers/nvdimm/namespace_devs.c static int shrink_dpa_allocation(struct nd_region *nd_region,
nd_region         509 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         510 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         513 drivers/nvdimm/namespace_devs.c 		rc = scan_free(nd_region, nd_mapping, label_id, n);
nd_region         522 drivers/nvdimm/namespace_devs.c 		struct nd_region *nd_region, struct nd_mapping *nd_mapping,
nd_region         542 drivers/nvdimm/namespace_devs.c 	nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
nd_region         563 drivers/nvdimm/namespace_devs.c static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
nd_region         578 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         586 drivers/nvdimm/namespace_devs.c 		WARN_ON(!is_nd_blk(&nd_region->dev));
nd_region         587 drivers/nvdimm/namespace_devs.c 		nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nd_region         614 drivers/nvdimm/namespace_devs.c static resource_size_t scan_allocate(struct nd_region *nd_region,
nd_region         651 drivers/nvdimm/namespace_devs.c 			space_valid(nd_region, ndd, label_id, NULL, next, exist,
nd_region         662 drivers/nvdimm/namespace_devs.c 			space_valid(nd_region, ndd, label_id, res, next, exist,
nd_region         673 drivers/nvdimm/namespace_devs.c 			space_valid(nd_region, ndd, label_id, res, next, exist,
nd_region         736 drivers/nvdimm/namespace_devs.c 		nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
nd_region         762 drivers/nvdimm/namespace_devs.c 		return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
nd_region         766 drivers/nvdimm/namespace_devs.c static int merge_dpa(struct nd_region *nd_region,
nd_region         787 drivers/nvdimm/namespace_devs.c 		nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
nd_region         800 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region;
nd_region         807 drivers/nvdimm/namespace_devs.c 	nd_region = to_nd_region(dev);
nd_region         808 drivers/nvdimm/namespace_devs.c 	if (nd_region->ndr_mappings == 0)
nd_region         813 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         814 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         820 drivers/nvdimm/namespace_devs.c 		n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
nd_region         823 drivers/nvdimm/namespace_devs.c 		rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
nd_region         824 drivers/nvdimm/namespace_devs.c 		dev_WARN_ONCE(&nd_region->dev, rem,
nd_region         871 drivers/nvdimm/namespace_devs.c static int grow_dpa_allocation(struct nd_region *nd_region,
nd_region         874 drivers/nvdimm/namespace_devs.c 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nd_region         878 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         879 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         895 drivers/nvdimm/namespace_devs.c 			rem = scan_allocate(nd_region, nd_mapping,
nd_region         905 drivers/nvdimm/namespace_devs.c 		dev_WARN_ONCE(&nd_region->dev, rem,
nd_region         912 drivers/nvdimm/namespace_devs.c 		rc = merge_dpa(nd_region, nd_mapping, label_id);
nd_region         920 drivers/nvdimm/namespace_devs.c static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
nd_region         932 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         948 drivers/nvdimm/namespace_devs.c 					* nd_region->ndr_mappings;
nd_region         957 drivers/nvdimm/namespace_devs.c 	res->start = nd_region->ndr_start + offset;
nd_region         973 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1004 drivers/nvdimm/namespace_devs.c 	if (nd_region->ndr_mappings == 0) {
nd_region        1009 drivers/nvdimm/namespace_devs.c 	div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
nd_region        1012 drivers/nvdimm/namespace_devs.c 				(PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
nd_region        1017 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1018 drivers/nvdimm/namespace_devs.c 		nd_mapping = &nd_region->mapping[i];
nd_region        1030 drivers/nvdimm/namespace_devs.c 	available = nd_region_allocatable_dpa(nd_region);
nd_region        1038 drivers/nvdimm/namespace_devs.c 	val = div_u64(val, nd_region->ndr_mappings);
nd_region        1039 drivers/nvdimm/namespace_devs.c 	allocated = div_u64(allocated, nd_region->ndr_mappings);
nd_region        1041 drivers/nvdimm/namespace_devs.c 		rc = shrink_dpa_allocation(nd_region, &label_id,
nd_region        1044 drivers/nvdimm/namespace_devs.c 		rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
nd_region        1052 drivers/nvdimm/namespace_devs.c 		nd_namespace_pmem_set_resource(nd_region, nspm,
nd_region        1053 drivers/nvdimm/namespace_devs.c 				val * nd_region->ndr_mappings);
nd_region        1062 drivers/nvdimm/namespace_devs.c 	if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
nd_region        1071 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1085 drivers/nvdimm/namespace_devs.c 		rc = nd_namespace_label_update(nd_region, dev);
nd_region        1147 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1149 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1150 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1203 drivers/nvdimm/namespace_devs.c static int namespace_update_uuid(struct nd_region *nd_region,
nd_region        1223 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1224 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1239 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1240 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1273 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1297 drivers/nvdimm/namespace_devs.c 		rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
nd_region        1299 drivers/nvdimm/namespace_devs.c 		rc = nd_namespace_label_update(nd_region, dev);
nd_region        1361 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1386 drivers/nvdimm/namespace_devs.c 		rc = nd_namespace_label_update(nd_region, dev);
nd_region        1399 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1422 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1423 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1440 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1443 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1444 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1541 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region        1549 drivers/nvdimm/namespace_devs.c 		rc = nd_namespace_label_update(nd_region, dev);
nd_region        1762 drivers/nvdimm/namespace_devs.c static struct device **create_namespace_io(struct nd_region *nd_region)
nd_region        1780 drivers/nvdimm/namespace_devs.c 	dev->parent = &nd_region->dev;
nd_region        1782 drivers/nvdimm/namespace_devs.c 	res->name = dev_name(&nd_region->dev);
nd_region        1784 drivers/nvdimm/namespace_devs.c 	res->start = nd_region->ndr_start;
nd_region        1785 drivers/nvdimm/namespace_devs.c 	res->end = res->start + nd_region->ndr_size - 1;
nd_region        1791 drivers/nvdimm/namespace_devs.c static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
nd_region        1797 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1798 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1799 drivers/nvdimm/namespace_devs.c 		struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region        1835 drivers/nvdimm/namespace_devs.c 			if (nlabel != nd_region->ndr_mappings)
nd_region        1848 drivers/nvdimm/namespace_devs.c static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
nd_region        1855 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1856 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1889 drivers/nvdimm/namespace_devs.c 			dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
nd_region        1906 drivers/nvdimm/namespace_devs.c static struct device *create_namespace_pmem(struct nd_region *nd_region,
nd_region        1910 drivers/nvdimm/namespace_devs.c 	u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
nd_region        1911 drivers/nvdimm/namespace_devs.c 	u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
nd_region        1922 drivers/nvdimm/namespace_devs.c 		dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
nd_region        1927 drivers/nvdimm/namespace_devs.c 		dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
nd_region        1932 drivers/nvdimm/namespace_devs.c 		dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
nd_region        1943 drivers/nvdimm/namespace_devs.c 	dev->parent = &nd_region->dev;
nd_region        1945 drivers/nvdimm/namespace_devs.c 	res->name = dev_name(&nd_region->dev);
nd_region        1948 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1949 drivers/nvdimm/namespace_devs.c 		if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
nd_region        1951 drivers/nvdimm/namespace_devs.c 		if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
nd_region        1956 drivers/nvdimm/namespace_devs.c 	if (i < nd_region->ndr_mappings) {
nd_region        1957 drivers/nvdimm/namespace_devs.c 		struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
nd_region        1964 drivers/nvdimm/namespace_devs.c 		dev_err(&nd_region->dev, "%s missing label for %pUb\n",
nd_region        1978 drivers/nvdimm/namespace_devs.c 	rc = select_pmem_id(nd_region, nd_label->uuid);
nd_region        1983 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1987 drivers/nvdimm/namespace_devs.c 		nd_mapping = &nd_region->mapping[i];
nd_region        2018 drivers/nvdimm/namespace_devs.c 	nd_namespace_pmem_set_resource(nd_region, nspm, size);
nd_region        2025 drivers/nvdimm/namespace_devs.c 		dev_dbg(&nd_region->dev, "invalid label(s)\n");
nd_region        2028 drivers/nvdimm/namespace_devs.c 		dev_dbg(&nd_region->dev, "label not found\n");
nd_region        2031 drivers/nvdimm/namespace_devs.c 		dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
nd_region        2037 drivers/nvdimm/namespace_devs.c struct resource *nsblk_add_resource(struct nd_region *nd_region,
nd_region        2060 drivers/nvdimm/namespace_devs.c static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
nd_region        2065 drivers/nvdimm/namespace_devs.c 	if (!is_nd_blk(&nd_region->dev))
nd_region        2074 drivers/nvdimm/namespace_devs.c 	nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
nd_region        2079 drivers/nvdimm/namespace_devs.c 	dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
nd_region        2080 drivers/nvdimm/namespace_devs.c 	dev->parent = &nd_region->dev;
nd_region        2086 drivers/nvdimm/namespace_devs.c static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
nd_region        2092 drivers/nvdimm/namespace_devs.c 	if (!is_memory(&nd_region->dev))
nd_region        2101 drivers/nvdimm/namespace_devs.c 	dev->parent = &nd_region->dev;
nd_region        2103 drivers/nvdimm/namespace_devs.c 	res->name = dev_name(&nd_region->dev);
nd_region        2106 drivers/nvdimm/namespace_devs.c 	nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
nd_region        2111 drivers/nvdimm/namespace_devs.c 	dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
nd_region        2113 drivers/nvdimm/namespace_devs.c 	nd_namespace_pmem_set_resource(nd_region, nspm, 0);
nd_region        2118 drivers/nvdimm/namespace_devs.c void nd_region_create_ns_seed(struct nd_region *nd_region)
nd_region        2120 drivers/nvdimm/namespace_devs.c 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region        2122 drivers/nvdimm/namespace_devs.c 	if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
nd_region        2125 drivers/nvdimm/namespace_devs.c 	if (is_nd_blk(&nd_region->dev))
nd_region        2126 drivers/nvdimm/namespace_devs.c 		nd_region->ns_seed = nd_namespace_blk_create(nd_region);
nd_region        2128 drivers/nvdimm/namespace_devs.c 		nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
nd_region        2134 drivers/nvdimm/namespace_devs.c 	if (!nd_region->ns_seed)
nd_region        2135 drivers/nvdimm/namespace_devs.c 		dev_err(&nd_region->dev, "failed to create %s namespace\n",
nd_region        2136 drivers/nvdimm/namespace_devs.c 				is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
nd_region        2138 drivers/nvdimm/namespace_devs.c 		nd_device_register(nd_region->ns_seed);
nd_region        2141 drivers/nvdimm/namespace_devs.c void nd_region_create_dax_seed(struct nd_region *nd_region)
nd_region        2143 drivers/nvdimm/namespace_devs.c 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region        2144 drivers/nvdimm/namespace_devs.c 	nd_region->dax_seed = nd_dax_create(nd_region);
nd_region        2149 drivers/nvdimm/namespace_devs.c 	if (!nd_region->dax_seed)
nd_region        2150 drivers/nvdimm/namespace_devs.c 		dev_err(&nd_region->dev, "failed to create dax namespace\n");
nd_region        2153 drivers/nvdimm/namespace_devs.c void nd_region_create_pfn_seed(struct nd_region *nd_region)
nd_region        2155 drivers/nvdimm/namespace_devs.c 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region        2156 drivers/nvdimm/namespace_devs.c 	nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region        2161 drivers/nvdimm/namespace_devs.c 	if (!nd_region->pfn_seed)
nd_region        2162 drivers/nvdimm/namespace_devs.c 		dev_err(&nd_region->dev, "failed to create pfn namespace\n");
nd_region        2165 drivers/nvdimm/namespace_devs.c void nd_region_create_btt_seed(struct nd_region *nd_region)
nd_region        2167 drivers/nvdimm/namespace_devs.c 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region        2168 drivers/nvdimm/namespace_devs.c 	nd_region->btt_seed = nd_btt_create(nd_region);
nd_region        2173 drivers/nvdimm/namespace_devs.c 	if (!nd_region->btt_seed)
nd_region        2174 drivers/nvdimm/namespace_devs.c 		dev_err(&nd_region->dev, "failed to create btt namespace\n");
nd_region        2177 drivers/nvdimm/namespace_devs.c static int add_namespace_resource(struct nd_region *nd_region,
nd_region        2181 drivers/nvdimm/namespace_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region        2197 drivers/nvdimm/namespace_devs.c 			res = nsblk_add_resource(nd_region, ndd,
nd_region        2202 drivers/nvdimm/namespace_devs.c 			nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
nd_region        2204 drivers/nvdimm/namespace_devs.c 			dev_err(&nd_region->dev,
nd_region        2215 drivers/nvdimm/namespace_devs.c static struct device *create_namespace_blk(struct nd_region *nd_region,
nd_region        2219 drivers/nvdimm/namespace_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region        2220 drivers/nvdimm/namespace_devs.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region        2248 drivers/nvdimm/namespace_devs.c 	dev->parent = &nd_region->dev;
nd_region        2265 drivers/nvdimm/namespace_devs.c 	res = nsblk_add_resource(nd_region, ndd, nsblk,
nd_region        2269 drivers/nvdimm/namespace_devs.c 	nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
nd_region        2301 drivers/nvdimm/namespace_devs.c static struct device **scan_labels(struct nd_region *nd_region)
nd_region        2306 drivers/nvdimm/namespace_devs.c 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region        2318 drivers/nvdimm/namespace_devs.c 		if (is_nd_blk(&nd_region->dev)
nd_region        2329 drivers/nvdimm/namespace_devs.c 		i = add_namespace_resource(nd_region, nd_label, devs, count);
nd_region        2341 drivers/nvdimm/namespace_devs.c 		if (is_nd_blk(&nd_region->dev))
nd_region        2342 drivers/nvdimm/namespace_devs.c 			dev = create_namespace_blk(nd_region, nd_label, count);
nd_region        2348 drivers/nvdimm/namespace_devs.c 			dev = create_namespace_pmem(nd_region, nsindex, nd_label);
nd_region        2367 drivers/nvdimm/namespace_devs.c 	dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
nd_region        2368 drivers/nvdimm/namespace_devs.c 			count, is_nd_blk(&nd_region->dev)
nd_region        2378 drivers/nvdimm/namespace_devs.c 		if (is_nd_blk(&nd_region->dev)) {
nd_region        2394 drivers/nvdimm/namespace_devs.c 			nd_namespace_pmem_set_resource(nd_region, nspm, 0);
nd_region        2396 drivers/nvdimm/namespace_devs.c 		dev->parent = &nd_region->dev;
nd_region        2398 drivers/nvdimm/namespace_devs.c 	} else if (is_memory(&nd_region->dev)) {
nd_region        2400 drivers/nvdimm/namespace_devs.c 		for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        2405 drivers/nvdimm/namespace_devs.c 			nd_mapping = &nd_region->mapping[i];
nd_region        2430 drivers/nvdimm/namespace_devs.c 			if (is_nd_blk(&nd_region->dev))
nd_region        2439 drivers/nvdimm/namespace_devs.c static struct device **create_namespaces(struct nd_region *nd_region)
nd_region        2445 drivers/nvdimm/namespace_devs.c 	if (nd_region->ndr_mappings == 0)
nd_region        2449 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        2450 drivers/nvdimm/namespace_devs.c 		nd_mapping = &nd_region->mapping[i];
nd_region        2454 drivers/nvdimm/namespace_devs.c 	devs = scan_labels(nd_region);
nd_region        2456 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        2457 drivers/nvdimm/namespace_devs.c 		int reverse = nd_region->ndr_mappings - 1 - i;
nd_region        2459 drivers/nvdimm/namespace_devs.c 		nd_mapping = &nd_region->mapping[reverse];
nd_region        2468 drivers/nvdimm/namespace_devs.c 	struct nd_region *nd_region = region;
nd_region        2471 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        2472 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        2487 drivers/nvdimm/namespace_devs.c static int init_active_labels(struct nd_region *nd_region)
nd_region        2491 drivers/nvdimm/namespace_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        2492 drivers/nvdimm/namespace_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        2510 drivers/nvdimm/namespace_devs.c 			dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
nd_region        2548 drivers/nvdimm/namespace_devs.c 	if (i < nd_region->ndr_mappings) {
nd_region        2549 drivers/nvdimm/namespace_devs.c 		deactivate_labels(nd_region);
nd_region        2553 drivers/nvdimm/namespace_devs.c 	return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
nd_region        2554 drivers/nvdimm/namespace_devs.c 			nd_region);
nd_region        2557 drivers/nvdimm/namespace_devs.c int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
nd_region        2563 drivers/nvdimm/namespace_devs.c 	nvdimm_bus_lock(&nd_region->dev);
nd_region        2564 drivers/nvdimm/namespace_devs.c 	rc = init_active_labels(nd_region);
nd_region        2566 drivers/nvdimm/namespace_devs.c 		nvdimm_bus_unlock(&nd_region->dev);
nd_region        2570 drivers/nvdimm/namespace_devs.c 	type = nd_region_to_nstype(nd_region);
nd_region        2573 drivers/nvdimm/namespace_devs.c 		devs = create_namespace_io(nd_region);
nd_region        2577 drivers/nvdimm/namespace_devs.c 		devs = create_namespaces(nd_region);
nd_region        2582 drivers/nvdimm/namespace_devs.c 	nvdimm_bus_unlock(&nd_region->dev);
nd_region        2595 drivers/nvdimm/namespace_devs.c 			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
nd_region        2602 drivers/nvdimm/namespace_devs.c 			id = ida_simple_get(&nd_region->ns_ida, 0, 0,
nd_region        2610 drivers/nvdimm/namespace_devs.c 		dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
nd_region        2615 drivers/nvdimm/namespace_devs.c 		nd_region->ns_seed = devs[0];
nd_region         118 drivers/nvdimm/nd-core.h struct nd_region;
nd_region         119 drivers/nvdimm/nd-core.h void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
nd_region         120 drivers/nvdimm/nd-core.h void nd_region_create_ns_seed(struct nd_region *nd_region);
nd_region         121 drivers/nvdimm/nd-core.h void nd_region_create_btt_seed(struct nd_region *nd_region);
nd_region         122 drivers/nvdimm/nd-core.h void nd_region_create_pfn_seed(struct nd_region *nd_region);
nd_region         123 drivers/nvdimm/nd-core.h void nd_region_create_dax_seed(struct nd_region *nd_region);
nd_region         135 drivers/nvdimm/nd-core.h struct nd_region;
nd_region         144 drivers/nvdimm/nd-core.h resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
nd_region         146 drivers/nvdimm/nd-core.h resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
nd_region         147 drivers/nvdimm/nd-core.h resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
nd_region         149 drivers/nvdimm/nd-core.h resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
nd_region         150 drivers/nvdimm/nd-core.h resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
nd_region         151 drivers/nvdimm/nd-core.h int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
nd_region         156 drivers/nvdimm/nd-core.h struct resource *nsblk_add_resource(struct nd_region *nd_region,
nd_region         158 drivers/nvdimm/nd.h 	int (*flush)(struct nd_region *nd_region, struct bio *bio);
nd_region         167 drivers/nvdimm/nd.h 	struct nd_region nd_region;
nd_region         270 drivers/nvdimm/nd.h struct device *nd_btt_create(struct nd_region *nd_region);
nd_region         283 drivers/nvdimm/nd.h static inline struct device *nd_btt_create(struct nd_region *nd_region)
nd_region         296 drivers/nvdimm/nd.h struct device *nd_pfn_create(struct nd_region *nd_region);
nd_region         313 drivers/nvdimm/nd.h static inline struct device *nd_pfn_create(struct nd_region *nd_region)
nd_region         328 drivers/nvdimm/nd.h struct device *nd_dax_create(struct nd_region *nd_region);
nd_region         341 drivers/nvdimm/nd.h static inline struct device *nd_dax_create(struct nd_region *nd_region)
nd_region         347 drivers/nvdimm/nd.h int nd_region_to_nstype(struct nd_region *nd_region);
nd_region         348 drivers/nvdimm/nd.h int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
nd_region         349 drivers/nvdimm/nd.h u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
nd_region         351 drivers/nvdimm/nd.h u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
nd_region         371 drivers/nvdimm/nd.h void nvdimm_badblocks_populate(struct nd_region *nd_region,
nd_region         397 drivers/nvdimm/nd.h int nd_blk_region_init(struct nd_region *nd_region);
nd_region         398 drivers/nvdimm/nd.h int nd_region_activate(struct nd_region *nd_region);
nd_region          38 drivers/nvdimm/nd_virtio.c static int virtio_pmem_flush(struct nd_region *nd_region)
nd_region          40 drivers/nvdimm/nd_virtio.c 	struct virtio_device *vdev = nd_region->provider_data;
nd_region         101 drivers/nvdimm/nd_virtio.c int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
nd_region         119 drivers/nvdimm/nd_virtio.c 	if (virtio_pmem_flush(nd_region))
nd_region          62 drivers/nvdimm/of_pmem.c 		struct nd_region *region;
nd_region          19 drivers/nvdimm/pfn_devs.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region          24 drivers/nvdimm/pfn_devs.c 	ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
nd_region         323 drivers/nvdimm/pfn_devs.c static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
nd_region         332 drivers/nvdimm/pfn_devs.c 	nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
nd_region         339 drivers/nvdimm/pfn_devs.c 	dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
nd_region         342 drivers/nvdimm/pfn_devs.c 	dev->parent = &nd_region->dev;
nd_region         347 drivers/nvdimm/pfn_devs.c struct device *nd_pfn_create(struct nd_region *nd_region)
nd_region         352 drivers/nvdimm/pfn_devs.c 	if (!is_memory(&nd_region->dev))
nd_region         355 drivers/nvdimm/pfn_devs.c 	nd_pfn = nd_pfn_alloc(nd_region);
nd_region         370 drivers/nvdimm/pfn_devs.c 	struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
nd_region         389 drivers/nvdimm/pfn_devs.c 		bb_present = badblocks_check(&nd_region->bb, meta_start,
nd_region         394 drivers/nvdimm/pfn_devs.c 			nsoff = ALIGN_DOWN((nd_region->ndr_start
nd_region         604 drivers/nvdimm/pfn_devs.c 	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
nd_region         618 drivers/nvdimm/pfn_devs.c 	nd_pfn = nd_pfn_alloc(nd_region);
nd_region         712 drivers/nvdimm/pfn_devs.c 	struct nd_region *nd_region;
nd_region         738 drivers/nvdimm/pfn_devs.c 	nd_region = to_nd_region(nd_pfn->dev.parent);
nd_region         739 drivers/nvdimm/pfn_devs.c 	if (nd_region->ro) {
nd_region         742 drivers/nvdimm/pfn_devs.c 				dev_name(&nd_region->dev));
nd_region          42 drivers/nvdimm/pmem.c static struct nd_region *to_region(struct pmem_device *pmem)
nd_region         194 drivers/nvdimm/pmem.c 	struct nd_region *nd_region = to_region(pmem);
nd_region         197 drivers/nvdimm/pmem.c 		ret = nvdimm_flush(nd_region, bio);
nd_region         212 drivers/nvdimm/pmem.c 		ret = nvdimm_flush(nd_region, bio);
nd_region         356 drivers/nvdimm/pmem.c 	struct nd_region *nd_region = to_nd_region(dev->parent);
nd_region         389 drivers/nvdimm/pmem.c 	fua = nvdimm_has_flush(nd_region);
nd_region         462 drivers/nvdimm/pmem.c 	nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
nd_region         465 drivers/nvdimm/pmem.c 	if (is_nvdimm_sync(nd_region))
nd_region         472 drivers/nvdimm/pmem.c 	dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
nd_region         564 drivers/nvdimm/pmem.c 	struct nd_region *nd_region;
nd_region         579 drivers/nvdimm/pmem.c 		nd_region = to_nd_region(ndns->dev.parent);
nd_region         586 drivers/nvdimm/pmem.c 		nd_region = to_region(pmem);
nd_region         607 drivers/nvdimm/pmem.c 	nvdimm_badblocks_populate(nd_region, bb, &res);
nd_region          17 drivers/nvdimm/region.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region          19 drivers/nvdimm/region.c 	if (nd_region->num_lanes > num_online_cpus()
nd_region          20 drivers/nvdimm/region.c 			&& nd_region->num_lanes < num_possible_cpus()
nd_region          23 drivers/nvdimm/region.c 				num_online_cpus(), nd_region->num_lanes,
nd_region          26 drivers/nvdimm/region.c 				nd_region->num_lanes);
nd_region          29 drivers/nvdimm/region.c 	rc = nd_region_activate(nd_region);
nd_region          33 drivers/nvdimm/region.c 	rc = nd_blk_region_init(nd_region);
nd_region          37 drivers/nvdimm/region.c 	if (is_memory(&nd_region->dev)) {
nd_region          40 drivers/nvdimm/region.c 		if (devm_init_badblocks(dev, &nd_region->bb))
nd_region          42 drivers/nvdimm/region.c 		nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
nd_region          44 drivers/nvdimm/region.c 		if (!nd_region->bb_state)
nd_region          45 drivers/nvdimm/region.c 			dev_warn(&nd_region->dev,
nd_region          47 drivers/nvdimm/region.c 		ndr_res.start = nd_region->ndr_start;
nd_region          48 drivers/nvdimm/region.c 		ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
nd_region          49 drivers/nvdimm/region.c 		nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
nd_region          52 drivers/nvdimm/region.c 	rc = nd_region_register_namespaces(nd_region, &err);
nd_region          63 drivers/nvdimm/region.c 	nd_region->btt_seed = nd_btt_create(nd_region);
nd_region          64 drivers/nvdimm/region.c 	nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region          65 drivers/nvdimm/region.c 	nd_region->dax_seed = nd_dax_create(nd_region);
nd_region          91 drivers/nvdimm/region.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region          97 drivers/nvdimm/region.c 	nd_region->ns_seed = NULL;
nd_region          98 drivers/nvdimm/region.c 	nd_region->btt_seed = NULL;
nd_region          99 drivers/nvdimm/region.c 	nd_region->pfn_seed = NULL;
nd_region         100 drivers/nvdimm/region.c 	nd_region->dax_seed = NULL;
nd_region         108 drivers/nvdimm/region.c 	sysfs_put(nd_region->bb_state);
nd_region         109 drivers/nvdimm/region.c 	nd_region->bb_state = NULL;
nd_region         123 drivers/nvdimm/region.c 		struct nd_region *nd_region = to_nd_region(dev);
nd_region         126 drivers/nvdimm/region.c 		if (is_memory(&nd_region->dev)) {
nd_region         127 drivers/nvdimm/region.c 			res.start = nd_region->ndr_start;
nd_region         128 drivers/nvdimm/region.c 			res.end = nd_region->ndr_start +
nd_region         129 drivers/nvdimm/region.c 				nd_region->ndr_size - 1;
nd_region         130 drivers/nvdimm/region.c 			nvdimm_badblocks_populate(nd_region,
nd_region         131 drivers/nvdimm/region.c 					&nd_region->bb, &res);
nd_region         132 drivers/nvdimm/region.c 			if (nd_region->bb_state)
nd_region         133 drivers/nvdimm/region.c 				sysfs_notify_dirent(nd_region->bb_state);
nd_region          62 drivers/nvdimm/region_devs.c int nd_region_activate(struct nd_region *nd_region)
nd_region          66 drivers/nvdimm/region_devs.c 	struct device *dev = &nd_region->dev;
nd_region          69 drivers/nvdimm/region_devs.c 	nvdimm_bus_lock(&nd_region->dev);
nd_region          70 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region          71 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region          75 drivers/nvdimm/region_devs.c 			nvdimm_bus_unlock(&nd_region->dev);
nd_region          86 drivers/nvdimm/region_devs.c 	nvdimm_bus_unlock(&nd_region->dev);
nd_region          97 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region          98 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         100 drivers/nvdimm/region_devs.c 		int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
nd_region         110 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
nd_region         115 drivers/nvdimm/region_devs.c 		for (j = i + 1; j < nd_region->ndr_mappings; j++)
nd_region         126 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         129 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         130 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         135 drivers/nvdimm/region_devs.c 	free_percpu(nd_region->lane);
nd_region         136 drivers/nvdimm/region_devs.c 	ida_simple_remove(&region_ida, nd_region->id);
nd_region         140 drivers/nvdimm/region_devs.c 		kfree(nd_region);
nd_region         173 drivers/nvdimm/region_devs.c struct nd_region *to_nd_region(struct device *dev)
nd_region         175 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
nd_region         178 drivers/nvdimm/region_devs.c 	return nd_region;
nd_region         182 drivers/nvdimm/region_devs.c struct device *nd_region_dev(struct nd_region *nd_region)
nd_region         184 drivers/nvdimm/region_devs.c 	if (!nd_region)
nd_region         186 drivers/nvdimm/region_devs.c 	return &nd_region->dev;
nd_region         192 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         195 drivers/nvdimm/region_devs.c 	return container_of(nd_region, struct nd_blk_region, nd_region);
nd_region         199 drivers/nvdimm/region_devs.c void *nd_region_provider_data(struct nd_region *nd_region)
nd_region         201 drivers/nvdimm/region_devs.c 	return nd_region->provider_data;
nd_region         225 drivers/nvdimm/region_devs.c int nd_region_to_nstype(struct nd_region *nd_region)
nd_region         227 drivers/nvdimm/region_devs.c 	if (is_memory(&nd_region->dev)) {
nd_region         230 drivers/nvdimm/region_devs.c 		for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
nd_region         231 drivers/nvdimm/region_devs.c 			struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         241 drivers/nvdimm/region_devs.c 	} else if (is_nd_blk(&nd_region->dev)) {
nd_region         252 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         256 drivers/nvdimm/region_devs.c 		size = nd_region->ndr_size;
nd_region         257 drivers/nvdimm/region_devs.c 	} else if (nd_region->ndr_mappings == 1) {
nd_region         258 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         270 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         276 drivers/nvdimm/region_devs.c 	return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
nd_region         284 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         290 drivers/nvdimm/region_devs.c 	rc = nvdimm_flush(nd_region, NULL);
nd_region         301 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         303 drivers/nvdimm/region_devs.c 	return sprintf(buf, "%d\n", nd_region->ndr_mappings);
nd_region         310 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         312 drivers/nvdimm/region_devs.c 	return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
nd_region         319 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         320 drivers/nvdimm/region_devs.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region         337 drivers/nvdimm/region_devs.c 	if (nd_region->ndr_mappings) {
nd_region         338 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nd_region         346 drivers/nvdimm/region_devs.c 					nd_region_interleave_set_cookie(nd_region,
nd_region         359 drivers/nvdimm/region_devs.c resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
nd_region         364 drivers/nvdimm/region_devs.c 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region         369 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         370 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         377 drivers/nvdimm/region_devs.c 		if (is_memory(&nd_region->dev)) {
nd_region         378 drivers/nvdimm/region_devs.c 			available += nd_pmem_available_dpa(nd_region,
nd_region         384 drivers/nvdimm/region_devs.c 		} else if (is_nd_blk(&nd_region->dev))
nd_region         385 drivers/nvdimm/region_devs.c 			available += nd_blk_available_dpa(nd_region);
nd_region         391 drivers/nvdimm/region_devs.c resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
nd_region         396 drivers/nvdimm/region_devs.c 	if (is_memory(&nd_region->dev))
nd_region         399 drivers/nvdimm/region_devs.c 	WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
nd_region         400 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region         401 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region         403 drivers/nvdimm/region_devs.c 		if (is_memory(&nd_region->dev))
nd_region         405 drivers/nvdimm/region_devs.c 					nd_pmem_max_contiguous_dpa(nd_region,
nd_region         407 drivers/nvdimm/region_devs.c 		else if (is_nd_blk(&nd_region->dev))
nd_region         408 drivers/nvdimm/region_devs.c 			available += nd_blk_available_dpa(nd_region);
nd_region         410 drivers/nvdimm/region_devs.c 	if (is_memory(&nd_region->dev))
nd_region         411 drivers/nvdimm/region_devs.c 		return available * nd_region->ndr_mappings;
nd_region         418 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         430 drivers/nvdimm/region_devs.c 	available = nd_region_available_dpa(nd_region);
nd_region         441 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         447 drivers/nvdimm/region_devs.c 	available = nd_region_allocatable_dpa(nd_region);
nd_region         475 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         479 drivers/nvdimm/region_devs.c 	if (nd_region->ns_seed)
nd_region         480 drivers/nvdimm/region_devs.c 		rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
nd_region         491 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         495 drivers/nvdimm/region_devs.c 	if (nd_region->btt_seed)
nd_region         496 drivers/nvdimm/region_devs.c 		rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
nd_region         508 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         512 drivers/nvdimm/region_devs.c 	if (nd_region->pfn_seed)
nd_region         513 drivers/nvdimm/region_devs.c 		rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
nd_region         525 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         529 drivers/nvdimm/region_devs.c 	if (nd_region->dax_seed)
nd_region         530 drivers/nvdimm/region_devs.c 		rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
nd_region         542 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         544 drivers/nvdimm/region_devs.c 	return sprintf(buf, "%d\n", nd_region->ro);
nd_region         552 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         557 drivers/nvdimm/region_devs.c 	nd_region->ro = ro;
nd_region         565 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         570 drivers/nvdimm/region_devs.c 		rc = badblocks_show(&nd_region->bb, buf, 0);
nd_region         582 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         584 drivers/nvdimm/region_devs.c 	return sprintf(buf, "%#llx\n", nd_region->ndr_start);
nd_region         591 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         593 drivers/nvdimm/region_devs.c 	if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
nd_region         595 drivers/nvdimm/region_devs.c 	else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
nd_region         625 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         626 drivers/nvdimm/region_devs.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region         627 drivers/nvdimm/region_devs.c 	int type = nd_region_to_nstype(nd_region);
nd_region         646 drivers/nvdimm/region_devs.c 		int has_flush = nvdimm_has_flush(nd_region);
nd_region         657 drivers/nvdimm/region_devs.c 		if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
nd_region         683 drivers/nvdimm/region_devs.c u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
nd_region         686 drivers/nvdimm/region_devs.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region         697 drivers/nvdimm/region_devs.c u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
nd_region         699 drivers/nvdimm/region_devs.c 	struct nd_interleave_set *nd_set = nd_region->nd_set;
nd_region         721 drivers/nvdimm/region_devs.c void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
nd_region         724 drivers/nvdimm/region_devs.c 	if (nd_region->ns_seed == dev) {
nd_region         725 drivers/nvdimm/region_devs.c 		nd_region_create_ns_seed(nd_region);
nd_region         729 drivers/nvdimm/region_devs.c 		if (nd_region->btt_seed == dev)
nd_region         730 drivers/nvdimm/region_devs.c 			nd_region_create_btt_seed(nd_region);
nd_region         731 drivers/nvdimm/region_devs.c 		if (nd_region->ns_seed == &nd_btt->ndns->dev)
nd_region         732 drivers/nvdimm/region_devs.c 			nd_region_create_ns_seed(nd_region);
nd_region         736 drivers/nvdimm/region_devs.c 		if (nd_region->pfn_seed == dev)
nd_region         737 drivers/nvdimm/region_devs.c 			nd_region_create_pfn_seed(nd_region);
nd_region         738 drivers/nvdimm/region_devs.c 		if (nd_region->ns_seed == &nd_pfn->ndns->dev)
nd_region         739 drivers/nvdimm/region_devs.c 			nd_region_create_ns_seed(nd_region);
nd_region         743 drivers/nvdimm/region_devs.c 		if (nd_region->dax_seed == dev)
nd_region         744 drivers/nvdimm/region_devs.c 			nd_region_create_dax_seed(nd_region);
nd_region         745 drivers/nvdimm/region_devs.c 		if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region         746 drivers/nvdimm/region_devs.c 			nd_region_create_ns_seed(nd_region);
nd_region         753 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         757 drivers/nvdimm/region_devs.c 	if (n >= nd_region->ndr_mappings)
nd_region         759 drivers/nvdimm/region_devs.c 	nd_mapping = &nd_region->mapping[n];
nd_region         815 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region = to_nd_region(dev);
nd_region         817 drivers/nvdimm/region_devs.c 	if (n < nd_region->ndr_mappings)
nd_region         864 drivers/nvdimm/region_devs.c int nd_blk_region_init(struct nd_region *nd_region)
nd_region         866 drivers/nvdimm/region_devs.c 	struct device *dev = &nd_region->dev;
nd_region         872 drivers/nvdimm/region_devs.c 	if (nd_region->ndr_mappings < 1) {
nd_region         897 drivers/nvdimm/region_devs.c unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
nd_region         902 drivers/nvdimm/region_devs.c 	if (nd_region->num_lanes < nr_cpu_ids) {
nd_region         905 drivers/nvdimm/region_devs.c 		lane = cpu % nd_region->num_lanes;
nd_region         906 drivers/nvdimm/region_devs.c 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
nd_region         907 drivers/nvdimm/region_devs.c 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
nd_region         917 drivers/nvdimm/region_devs.c void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
nd_region         919 drivers/nvdimm/region_devs.c 	if (nd_region->num_lanes < nr_cpu_ids) {
nd_region         923 drivers/nvdimm/region_devs.c 		ndl_count = per_cpu_ptr(nd_region->lane, cpu);
nd_region         924 drivers/nvdimm/region_devs.c 		ndl_lock = per_cpu_ptr(nd_region->lane, lane);
nd_region         933 drivers/nvdimm/region_devs.c static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region         937 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region;
nd_region         974 drivers/nvdimm/region_devs.c 			nd_region = &ndbr->nd_region;
nd_region         980 drivers/nvdimm/region_devs.c 		nd_region = kzalloc(struct_size(nd_region, mapping,
nd_region         983 drivers/nvdimm/region_devs.c 		region_buf = nd_region;
nd_region         988 drivers/nvdimm/region_devs.c 	nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
nd_region         989 drivers/nvdimm/region_devs.c 	if (nd_region->id < 0)
nd_region         992 drivers/nvdimm/region_devs.c 	nd_region->lane = alloc_percpu(struct nd_percpu_lane);
nd_region         993 drivers/nvdimm/region_devs.c 	if (!nd_region->lane)
nd_region         999 drivers/nvdimm/region_devs.c 		ndl = per_cpu_ptr(nd_region->lane, i);
nd_region        1008 drivers/nvdimm/region_devs.c 		nd_region->mapping[i].nvdimm = nvdimm;
nd_region        1009 drivers/nvdimm/region_devs.c 		nd_region->mapping[i].start = mapping->start;
nd_region        1010 drivers/nvdimm/region_devs.c 		nd_region->mapping[i].size = mapping->size;
nd_region        1011 drivers/nvdimm/region_devs.c 		nd_region->mapping[i].position = mapping->position;
nd_region        1012 drivers/nvdimm/region_devs.c 		INIT_LIST_HEAD(&nd_region->mapping[i].labels);
nd_region        1013 drivers/nvdimm/region_devs.c 		mutex_init(&nd_region->mapping[i].lock);
nd_region        1017 drivers/nvdimm/region_devs.c 	nd_region->ndr_mappings = ndr_desc->num_mappings;
nd_region        1018 drivers/nvdimm/region_devs.c 	nd_region->provider_data = ndr_desc->provider_data;
nd_region        1019 drivers/nvdimm/region_devs.c 	nd_region->nd_set = ndr_desc->nd_set;
nd_region        1020 drivers/nvdimm/region_devs.c 	nd_region->num_lanes = ndr_desc->num_lanes;
nd_region        1021 drivers/nvdimm/region_devs.c 	nd_region->flags = ndr_desc->flags;
nd_region        1022 drivers/nvdimm/region_devs.c 	nd_region->ro = ro;
nd_region        1023 drivers/nvdimm/region_devs.c 	nd_region->numa_node = ndr_desc->numa_node;
nd_region        1024 drivers/nvdimm/region_devs.c 	nd_region->target_node = ndr_desc->target_node;
nd_region        1025 drivers/nvdimm/region_devs.c 	ida_init(&nd_region->ns_ida);
nd_region        1026 drivers/nvdimm/region_devs.c 	ida_init(&nd_region->btt_ida);
nd_region        1027 drivers/nvdimm/region_devs.c 	ida_init(&nd_region->pfn_ida);
nd_region        1028 drivers/nvdimm/region_devs.c 	ida_init(&nd_region->dax_ida);
nd_region        1029 drivers/nvdimm/region_devs.c 	dev = &nd_region->dev;
nd_region        1030 drivers/nvdimm/region_devs.c 	dev_set_name(dev, "region%d", nd_region->id);
nd_region        1035 drivers/nvdimm/region_devs.c 	nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region        1036 drivers/nvdimm/region_devs.c 	nd_region->ndr_start = ndr_desc->res->start;
nd_region        1038 drivers/nvdimm/region_devs.c 		nd_region->flush = ndr_desc->flush;
nd_region        1040 drivers/nvdimm/region_devs.c 		nd_region->flush = NULL;
nd_region        1044 drivers/nvdimm/region_devs.c 	return nd_region;
nd_region        1047 drivers/nvdimm/region_devs.c 	ida_simple_remove(&region_ida, nd_region->id);
nd_region        1053 drivers/nvdimm/region_devs.c struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region        1062 drivers/nvdimm/region_devs.c struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region        1073 drivers/nvdimm/region_devs.c struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region        1082 drivers/nvdimm/region_devs.c int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
nd_region        1086 drivers/nvdimm/region_devs.c 	if (!nd_region->flush)
nd_region        1087 drivers/nvdimm/region_devs.c 		rc = generic_nvdimm_flush(nd_region);
nd_region        1089 drivers/nvdimm/region_devs.c 		if (nd_region->flush(nd_region, bio))
nd_region        1099 drivers/nvdimm/region_devs.c int generic_nvdimm_flush(struct nd_region *nd_region)
nd_region        1101 drivers/nvdimm/region_devs.c 	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
nd_region        1119 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++)
nd_region        1136 drivers/nvdimm/region_devs.c int nvdimm_has_flush(struct nd_region *nd_region)
nd_region        1141 drivers/nvdimm/region_devs.c 	if (nd_region->ndr_mappings == 0
nd_region        1145 drivers/nvdimm/region_devs.c 	for (i = 0; i < nd_region->ndr_mappings; i++) {
nd_region        1146 drivers/nvdimm/region_devs.c 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
nd_region        1162 drivers/nvdimm/region_devs.c int nvdimm_has_cache(struct nd_region *nd_region)
nd_region        1164 drivers/nvdimm/region_devs.c 	return is_nd_pmem(&nd_region->dev) &&
nd_region        1165 drivers/nvdimm/region_devs.c 		!test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
nd_region        1169 drivers/nvdimm/region_devs.c bool is_nvdimm_sync(struct nd_region *nd_region)
nd_region        1171 drivers/nvdimm/region_devs.c 	if (is_nd_volatile(&nd_region->dev))
nd_region        1174 drivers/nvdimm/region_devs.c 	return is_nd_pmem(&nd_region->dev) &&
nd_region        1175 drivers/nvdimm/region_devs.c 		!test_bit(ND_REGION_ASYNC, &nd_region->flags);
nd_region        1180 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region;
nd_region        1186 drivers/nvdimm/region_devs.c 	struct nd_region *nd_region;
nd_region        1193 drivers/nvdimm/region_devs.c 	nd_region = to_nd_region(dev);
nd_region        1194 drivers/nvdimm/region_devs.c 	if (nd_region == ctx->nd_region)
nd_region        1198 drivers/nvdimm/region_devs.c 	region_start = nd_region->ndr_start;
nd_region        1199 drivers/nvdimm/region_devs.c 	region_end = region_start + nd_region->ndr_size;
nd_region        1207 drivers/nvdimm/region_devs.c int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
nd_region        1210 drivers/nvdimm/region_devs.c 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nd_region        1212 drivers/nvdimm/region_devs.c 		.nd_region = nd_region,
nd_region          36 drivers/nvdimm/virtio_pmem.c 	struct nd_region *nd_region;
nd_region          86 drivers/nvdimm/virtio_pmem.c 	nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
nd_region          87 drivers/nvdimm/virtio_pmem.c 	if (!nd_region) {
nd_region          92 drivers/nvdimm/virtio_pmem.c 	nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
nd_region          54 drivers/nvdimm/virtio_pmem.h int async_pmem_flush(struct nd_region *nd_region, struct bio *bio);
nd_region         120 include/linux/libnvdimm.h struct nd_region;
nd_region         133 include/linux/libnvdimm.h 	int (*flush)(struct nd_region *nd_region, struct bio *bio);
nd_region         219 include/linux/libnvdimm.h struct nd_region *to_nd_region(struct device *dev);
nd_region         220 include/linux/libnvdimm.h struct device *nd_region_dev(struct nd_region *nd_region);
nd_region         250 include/linux/libnvdimm.h struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region         252 include/linux/libnvdimm.h struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region         254 include/linux/libnvdimm.h struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
nd_region         256 include/linux/libnvdimm.h void *nd_region_provider_data(struct nd_region *nd_region);
nd_region         261 include/linux/libnvdimm.h unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
nd_region         262 include/linux/libnvdimm.h void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
nd_region         264 include/linux/libnvdimm.h int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
nd_region         265 include/linux/libnvdimm.h int generic_nvdimm_flush(struct nd_region *nd_region);
nd_region         266 include/linux/libnvdimm.h int nvdimm_has_flush(struct nd_region *nd_region);
nd_region         267 include/linux/libnvdimm.h int nvdimm_has_cache(struct nd_region *nd_region);
nd_region         269 include/linux/libnvdimm.h bool is_nvdimm_sync(struct nd_region *nd_region);
nd_region         171 include/linux/nd.h struct nd_region;
nd_region         172 include/linux/nd.h void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
nd_region         632 tools/testing/nvdimm/test/nfit.c 	struct nd_region *region;
nd_region         643 tools/testing/nvdimm/test/nfit.c 	struct nd_region *nd_region;
nd_region         649 tools/testing/nvdimm/test/nfit.c 	nd_region = to_nd_region(dev);
nd_region         650 tools/testing/nvdimm/test/nfit.c 	ndr_end = nd_region->ndr_start + nd_region->ndr_size;
nd_region         652 tools/testing/nvdimm/test/nfit.c 	if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
nd_region         653 tools/testing/nvdimm/test/nfit.c 		ctx->region = nd_region;
nd_region         664 tools/testing/nvdimm/test/nfit.c 	struct nd_region *nd_region = NULL;
nd_region         679 tools/testing/nvdimm/test/nfit.c 	nd_region = ctx.region;
nd_region         681 tools/testing/nvdimm/test/nfit.c 	dpa = ctx.addr - nd_region->ndr_start;
nd_region         686 tools/testing/nvdimm/test/nfit.c 	nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
nd_region        2649 tools/testing/nvdimm/test/nfit.c 	struct nd_region *nd_region = &ndbr->nd_region;
nd_region        2652 tools/testing/nvdimm/test/nfit.c 	lane = nd_region_acquire_lane(nd_region);
nd_region        2661 tools/testing/nvdimm/test/nfit.c 	nd_region_release_lane(nd_region, lane);