Lines Matching refs:ns

524 	struct nvme_ns *ns = req->rq_disk->private_data;  in nvme_dif_remap()  local
530 if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) in nvme_dif_remap()
541 phys = nvme_block_nr(ns, blk_rq_pos(req)); in nvme_dif_remap()
542 nlb = (blk_rq_bytes(req) >> ns->lba_shift); in nvme_dif_remap()
543 ts = ns->disk->queue->integrity.tuple_size; in nvme_dif_remap()
553 static void nvme_init_integrity(struct nvme_ns *ns) in nvme_init_integrity() argument
557 switch (ns->pi_type) { in nvme_init_integrity()
569 integrity.tuple_size = ns->ms; in nvme_init_integrity()
570 blk_integrity_register(ns->disk, &integrity); in nvme_init_integrity()
571 blk_queue_max_integrity_segments(ns->queue, 1); in nvme_init_integrity()
584 static void nvme_init_integrity(struct nvme_ns *ns) in nvme_init_integrity() argument
750 static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_discard() argument
758 range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift); in nvme_submit_discard()
759 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); in nvme_submit_discard()
764 cmnd.dsm.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_discard()
772 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_flush() argument
780 cmnd.common.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_flush()
786 struct nvme_ns *ns) in nvme_submit_iod() argument
804 cmnd.rw.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_iod()
807 cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); in nvme_submit_iod()
808 cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); in nvme_submit_iod()
810 if (ns->ms) { in nvme_submit_iod()
811 switch (ns->pi_type) { in nvme_submit_iod()
820 nvme_block_nr(ns, blk_rq_pos(req))); in nvme_submit_iod()
844 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq() local
857 if (ns && ns->ms && !blk_integrity_rq(req)) { in nvme_queue_rq()
858 if (!(ns->pi_type && ns->ms == 8) && in nvme_queue_rq()
929 nvme_submit_discard(nvmeq, ns, req, iod); in nvme_queue_rq()
931 nvme_submit_flush(nvmeq, ns, req->tag); in nvme_queue_rq()
933 nvme_submit_iod(nvmeq, iod, ns); in nvme_queue_rq()
1805 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) in nvme_submit_io() argument
1807 struct nvme_dev *dev = ns->dev; in nvme_submit_io()
1828 length = (io.nblocks + 1) << ns->lba_shift; in nvme_submit_io()
1829 meta_len = (io.nblocks + 1) * ns->ms; in nvme_submit_io()
1833 if (ns->ext) { in nvme_submit_io()
1838 if (((io.metadata & 3) || !io.metadata) && !ns->ext) in nvme_submit_io()
1859 c.rw.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_io()
1869 status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, in nvme_submit_io()
1882 static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, in nvme_user_cmd() argument
1911 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, in nvme_user_cmd()
1934 struct nvme_ns *ns = bdev->bd_disk->private_data; in nvme_ioctl() local
1939 return ns->ns_id; in nvme_ioctl()
1941 return nvme_user_cmd(ns->dev, NULL, (void __user *)arg); in nvme_ioctl()
1943 return nvme_user_cmd(ns->dev, ns, (void __user *)arg); in nvme_ioctl()
1945 return nvme_submit_io(ns, (void __user *)arg); in nvme_ioctl()
1949 return nvme_sg_io(ns, (void __user *)arg); in nvme_ioctl()
1972 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); in nvme_free_ns() local
1974 if (ns->type == NVME_NS_LIGHTNVM) in nvme_free_ns()
1975 nvme_nvm_unregister(ns->queue, ns->disk->disk_name); in nvme_free_ns()
1978 ns->disk->private_data = NULL; in nvme_free_ns()
1981 kref_put(&ns->dev->kref, nvme_free_dev); in nvme_free_ns()
1982 put_disk(ns->disk); in nvme_free_ns()
1983 kfree(ns); in nvme_free_ns()
1989 struct nvme_ns *ns; in nvme_open() local
1992 ns = bdev->bd_disk->private_data; in nvme_open()
1993 if (!ns) in nvme_open()
1995 else if (!kref_get_unless_zero(&ns->kref)) in nvme_open()
2004 struct nvme_ns *ns = disk->private_data; in nvme_release() local
2005 kref_put(&ns->kref, nvme_free_ns); in nvme_release()
2017 static void nvme_config_discard(struct nvme_ns *ns) in nvme_config_discard() argument
2019 u32 logical_block_size = queue_logical_block_size(ns->queue); in nvme_config_discard()
2020 ns->queue->limits.discard_zeroes_data = 0; in nvme_config_discard()
2021 ns->queue->limits.discard_alignment = logical_block_size; in nvme_config_discard()
2022 ns->queue->limits.discard_granularity = logical_block_size; in nvme_config_discard()
2023 blk_queue_max_discard_sectors(ns->queue, 0xffffffff); in nvme_config_discard()
2024 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); in nvme_config_discard()
2029 struct nvme_ns *ns = disk->private_data; in nvme_revalidate_disk() local
2030 struct nvme_dev *dev = ns->dev; in nvme_revalidate_disk()
2036 if (nvme_identify_ns(dev, ns->ns_id, &id)) { in nvme_revalidate_disk()
2038 dev->instance, ns->ns_id); in nvme_revalidate_disk()
2046 if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { in nvme_revalidate_disk()
2047 if (nvme_nvm_register(ns->queue, disk->disk_name)) { in nvme_revalidate_disk()
2053 ns->type = NVME_NS_LIGHTNVM; in nvme_revalidate_disk()
2056 old_ms = ns->ms; in nvme_revalidate_disk()
2058 ns->lba_shift = id->lbaf[lbaf].ds; in nvme_revalidate_disk()
2059 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); in nvme_revalidate_disk()
2060 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); in nvme_revalidate_disk()
2066 if (ns->lba_shift == 0) in nvme_revalidate_disk()
2067 ns->lba_shift = 9; in nvme_revalidate_disk()
2068 bs = 1 << ns->lba_shift; in nvme_revalidate_disk()
2071 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? in nvme_revalidate_disk()
2075 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || in nvme_revalidate_disk()
2076 ns->ms != old_ms || in nvme_revalidate_disk()
2078 (ns->ms && ns->ext))) in nvme_revalidate_disk()
2081 ns->pi_type = pi_type; in nvme_revalidate_disk()
2082 blk_queue_logical_block_size(ns->queue, bs); in nvme_revalidate_disk()
2084 if (ns->ms && !ns->ext) in nvme_revalidate_disk()
2085 nvme_init_integrity(ns); in nvme_revalidate_disk()
2087 if ((ns->ms && !(ns->ms == 8 && ns->pi_type) && in nvme_revalidate_disk()
2089 ns->type == NVME_NS_LIGHTNVM) in nvme_revalidate_disk()
2092 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); in nvme_revalidate_disk()
2095 nvme_config_discard(ns); in nvme_revalidate_disk()
2125 struct nvme_ns *ns = bdev->bd_disk->private_data; in nvme_pr_command() local
2134 c.common.nsid = cpu_to_le32(ns->ns_id); in nvme_pr_command()
2137 return nvme_submit_sync_cmd(ns->queue, &c, data, 16); in nvme_pr_command()
2248 struct nvme_ns *ns; in nvme_alloc_ns() local
2252 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); in nvme_alloc_ns()
2253 if (!ns) in nvme_alloc_ns()
2256 ns->queue = blk_mq_init_queue(&dev->tagset); in nvme_alloc_ns()
2257 if (IS_ERR(ns->queue)) in nvme_alloc_ns()
2259 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); in nvme_alloc_ns()
2260 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); in nvme_alloc_ns()
2261 ns->dev = dev; in nvme_alloc_ns()
2262 ns->queue->queuedata = ns; in nvme_alloc_ns()
2268 kref_init(&ns->kref); in nvme_alloc_ns()
2269 ns->ns_id = nsid; in nvme_alloc_ns()
2270 ns->disk = disk; in nvme_alloc_ns()
2271 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ in nvme_alloc_ns()
2272 list_add_tail(&ns->list, &dev->namespaces); in nvme_alloc_ns()
2274 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); in nvme_alloc_ns()
2276 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); in nvme_alloc_ns()
2277 blk_queue_max_segments(ns->queue, in nvme_alloc_ns()
2281 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); in nvme_alloc_ns()
2283 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); in nvme_alloc_ns()
2284 blk_queue_virt_boundary(ns->queue, dev->page_size - 1); in nvme_alloc_ns()
2289 disk->private_data = ns; in nvme_alloc_ns()
2290 disk->queue = ns->queue; in nvme_alloc_ns()
2302 if (nvme_revalidate_disk(ns->disk)) in nvme_alloc_ns()
2306 if (ns->type != NVME_NS_LIGHTNVM) { in nvme_alloc_ns()
2307 add_disk(ns->disk); in nvme_alloc_ns()
2308 if (ns->ms) { in nvme_alloc_ns()
2309 struct block_device *bd = bdget_disk(ns->disk, 0); in nvme_alloc_ns()
2323 list_del(&ns->list); in nvme_alloc_ns()
2325 blk_cleanup_queue(ns->queue); in nvme_alloc_ns()
2327 kfree(ns); in nvme_alloc_ns()
2522 struct nvme_ns *ns; in nvme_find_ns() local
2524 list_for_each_entry(ns, &dev->namespaces, list) { in nvme_find_ns()
2525 if (ns->ns_id == nsid) in nvme_find_ns()
2526 return ns; in nvme_find_ns()
2527 if (ns->ns_id > nsid) in nvme_find_ns()
2539 static void nvme_ns_remove(struct nvme_ns *ns) in nvme_ns_remove() argument
2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); in nvme_ns_remove()
2544 blk_set_queue_dying(ns->queue); in nvme_ns_remove()
2552 blk_mq_abort_requeue_list(ns->queue); in nvme_ns_remove()
2554 if (ns->disk->flags & GENHD_FL_UP) in nvme_ns_remove()
2555 del_gendisk(ns->disk); in nvme_ns_remove()
2556 if (kill || !blk_queue_dying(ns->queue)) { in nvme_ns_remove()
2557 blk_mq_abort_requeue_list(ns->queue); in nvme_ns_remove()
2558 blk_cleanup_queue(ns->queue); in nvme_ns_remove()
2560 list_del_init(&ns->list); in nvme_ns_remove()
2561 kref_put(&ns->kref, nvme_free_ns); in nvme_ns_remove()
2566 struct nvme_ns *ns, *next; in nvme_scan_namespaces() local
2570 ns = nvme_find_ns(dev, i); in nvme_scan_namespaces()
2571 if (ns) { in nvme_scan_namespaces()
2572 if (revalidate_disk(ns->disk)) in nvme_scan_namespaces()
2573 nvme_ns_remove(ns); in nvme_scan_namespaces()
2577 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { in nvme_scan_namespaces()
2578 if (ns->ns_id > nn) in nvme_scan_namespaces()
2579 nvme_ns_remove(ns); in nvme_scan_namespaces()
2932 struct nvme_ns *ns; in nvme_freeze_queues() local
2934 list_for_each_entry(ns, &dev->namespaces, list) { in nvme_freeze_queues()
2935 blk_mq_freeze_queue_start(ns->queue); in nvme_freeze_queues()
2937 spin_lock_irq(ns->queue->queue_lock); in nvme_freeze_queues()
2938 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); in nvme_freeze_queues()
2939 spin_unlock_irq(ns->queue->queue_lock); in nvme_freeze_queues()
2941 blk_mq_cancel_requeue_work(ns->queue); in nvme_freeze_queues()
2942 blk_mq_stop_hw_queues(ns->queue); in nvme_freeze_queues()
2948 struct nvme_ns *ns; in nvme_unfreeze_queues() local
2950 list_for_each_entry(ns, &dev->namespaces, list) { in nvme_unfreeze_queues()
2951 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); in nvme_unfreeze_queues()
2952 blk_mq_unfreeze_queue(ns->queue); in nvme_unfreeze_queues()
2953 blk_mq_start_stopped_hw_queues(ns->queue, true); in nvme_unfreeze_queues()
2954 blk_mq_kick_requeue_list(ns->queue); in nvme_unfreeze_queues()
2987 struct nvme_ns *ns, *next; in nvme_dev_remove() local
2998 list_for_each_entry_safe(ns, next, &dev->namespaces, list) in nvme_dev_remove()
2999 nvme_ns_remove(ns); in nvme_dev_remove()
3105 struct nvme_ns *ns; in nvme_dev_ioctl() local
3113 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list); in nvme_dev_ioctl()
3114 return nvme_user_cmd(dev, ns, (void __user *)arg); in nvme_dev_ioctl()