Lines Matching refs:ns

506 	struct nvme_ns *ns = req->rq_disk->private_data;  in nvme_dif_remap()  local
512 if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) in nvme_dif_remap()
523 phys = nvme_block_nr(ns, blk_rq_pos(req)); in nvme_dif_remap()
524 nlb = (blk_rq_bytes(req) >> ns->lba_shift); in nvme_dif_remap()
525 ts = ns->disk->integrity->tuple_size; in nvme_dif_remap()
551 static void nvme_init_integrity(struct nvme_ns *ns) in nvme_init_integrity() argument
555 switch (ns->pi_type) { in nvme_init_integrity()
567 integrity.tuple_size = ns->ms; in nvme_init_integrity()
568 blk_integrity_register(ns->disk, &integrity); in nvme_init_integrity()
569 blk_queue_max_integrity_segments(ns->queue, 1); in nvme_init_integrity()
582 static void nvme_init_integrity(struct nvme_ns *ns) in nvme_init_integrity() argument
720 static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_discard() argument
728 range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift); in nvme_submit_discard()
729 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); in nvme_submit_discard()
734 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_discard()
744 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_flush() argument
752 cmnd->common.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_flush()
760 struct nvme_ns *ns) in nvme_submit_iod() argument
780 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_iod()
783 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); in nvme_submit_iod()
784 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); in nvme_submit_iod()
788 switch (ns->pi_type) { in nvme_submit_iod()
797 nvme_block_nr(ns, blk_rq_pos(req))); in nvme_submit_iod()
800 } else if (ns->ms) in nvme_submit_iod()
816 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq() local
828 if (ns->ms && !blk_integrity_rq(req)) { in nvme_queue_rq()
829 if (!(ns->pi_type && ns->ms == 8)) { in nvme_queue_rq()
836 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); in nvme_queue_rq()
891 nvme_submit_discard(nvmeq, ns, req, iod); in nvme_queue_rq()
893 nvme_submit_flush(nvmeq, ns, req->tag); in nvme_queue_rq()
895 nvme_submit_iod(nvmeq, iod, ns); in nvme_queue_rq()
1086 int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_ns *ns, in nvme_submit_io_cmd() argument
1092 req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT), in nvme_submit_io_cmd()
1746 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) in nvme_submit_io() argument
1748 struct nvme_dev *dev = ns->dev; in nvme_submit_io()
1760 length = (io.nblocks + 1) << ns->lba_shift; in nvme_submit_io()
1761 meta_len = (io.nblocks + 1) * ns->ms; in nvme_submit_io()
1763 if (meta_len && ((io.metadata & 3) || !io.metadata) && !ns->ext) in nvme_submit_io()
1765 else if (meta_len && ns->ext) { in nvme_submit_io()
1811 c.rw.nsid = cpu_to_le32(ns->ns_id); in nvme_submit_io()
1822 status = nvme_submit_io_cmd(dev, ns, &c, NULL); in nvme_submit_io()
1836 static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, in nvme_user_cmd() argument
1879 else if (ns) { in nvme_user_cmd()
1882 req = blk_mq_alloc_request(ns->queue, WRITE, in nvme_user_cmd()
1909 struct nvme_ns *ns = bdev->bd_disk->private_data; in nvme_ioctl() local
1914 return ns->ns_id; in nvme_ioctl()
1916 return nvme_user_cmd(ns->dev, NULL, (void __user *)arg); in nvme_ioctl()
1918 return nvme_user_cmd(ns->dev, ns, (void __user *)arg); in nvme_ioctl()
1920 return nvme_submit_io(ns, (void __user *)arg); in nvme_ioctl()
1924 return nvme_sg_io(ns, (void __user *)arg); in nvme_ioctl()
1947 struct nvme_ns *ns; in nvme_open() local
1950 ns = bdev->bd_disk->private_data; in nvme_open()
1951 if (!ns) in nvme_open()
1953 else if (!kref_get_unless_zero(&ns->dev->kref)) in nvme_open()
1964 struct nvme_ns *ns = disk->private_data; in nvme_release() local
1965 struct nvme_dev *dev = ns->dev; in nvme_release()
1979 static void nvme_config_discard(struct nvme_ns *ns) in nvme_config_discard() argument
1981 u32 logical_block_size = queue_logical_block_size(ns->queue); in nvme_config_discard()
1982 ns->queue->limits.discard_zeroes_data = 0; in nvme_config_discard()
1983 ns->queue->limits.discard_alignment = logical_block_size; in nvme_config_discard()
1984 ns->queue->limits.discard_granularity = logical_block_size; in nvme_config_discard()
1985 ns->queue->limits.max_discard_sectors = 0xffffffff; in nvme_config_discard()
1986 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); in nvme_config_discard()
1991 struct nvme_ns *ns = disk->private_data; in nvme_revalidate_disk() local
1992 struct nvme_dev *dev = ns->dev; in nvme_revalidate_disk()
2006 if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) { in nvme_revalidate_disk()
2009 ns->ns_id); in nvme_revalidate_disk()
2013 old_ms = ns->ms; in nvme_revalidate_disk()
2015 ns->lba_shift = id->lbaf[lbaf].ds; in nvme_revalidate_disk()
2016 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); in nvme_revalidate_disk()
2017 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); in nvme_revalidate_disk()
2023 if (ns->lba_shift == 0) in nvme_revalidate_disk()
2024 ns->lba_shift = 9; in nvme_revalidate_disk()
2025 bs = 1 << ns->lba_shift; in nvme_revalidate_disk()
2028 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? in nvme_revalidate_disk()
2031 if (blk_get_integrity(disk) && (ns->pi_type != pi_type || in nvme_revalidate_disk()
2032 ns->ms != old_ms || in nvme_revalidate_disk()
2034 (ns->ms && ns->ext))) in nvme_revalidate_disk()
2037 ns->pi_type = pi_type; in nvme_revalidate_disk()
2038 blk_queue_logical_block_size(ns->queue, bs); in nvme_revalidate_disk()
2040 if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && in nvme_revalidate_disk()
2041 !ns->ext) in nvme_revalidate_disk()
2042 nvme_init_integrity(ns); in nvme_revalidate_disk()
2044 if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) in nvme_revalidate_disk()
2047 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); in nvme_revalidate_disk()
2050 nvme_config_discard(ns); in nvme_revalidate_disk()
2109 struct nvme_ns *ns; in nvme_alloc_ns() local
2113 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); in nvme_alloc_ns()
2114 if (!ns) in nvme_alloc_ns()
2117 ns->queue = blk_mq_init_queue(&dev->tagset); in nvme_alloc_ns()
2118 if (IS_ERR(ns->queue)) in nvme_alloc_ns()
2120 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); in nvme_alloc_ns()
2121 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); in nvme_alloc_ns()
2122 queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue); in nvme_alloc_ns()
2123 ns->dev = dev; in nvme_alloc_ns()
2124 ns->queue->queuedata = ns; in nvme_alloc_ns()
2130 ns->ns_id = nsid; in nvme_alloc_ns()
2131 ns->disk = disk; in nvme_alloc_ns()
2132 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ in nvme_alloc_ns()
2133 list_add_tail(&ns->list, &dev->namespaces); in nvme_alloc_ns()
2135 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); in nvme_alloc_ns()
2137 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); in nvme_alloc_ns()
2139 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); in nvme_alloc_ns()
2141 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); in nvme_alloc_ns()
2146 disk->private_data = ns; in nvme_alloc_ns()
2147 disk->queue = ns->queue; in nvme_alloc_ns()
2159 nvme_revalidate_disk(ns->disk); in nvme_alloc_ns()
2160 add_disk(ns->disk); in nvme_alloc_ns()
2161 if (ns->ms) in nvme_alloc_ns()
2162 revalidate_disk(ns->disk); in nvme_alloc_ns()
2165 blk_cleanup_queue(ns->queue); in nvme_alloc_ns()
2167 kfree(ns); in nvme_alloc_ns()
2588 struct nvme_ns *ns; in nvme_freeze_queues() local
2590 list_for_each_entry(ns, &dev->namespaces, list) { in nvme_freeze_queues()
2591 blk_mq_freeze_queue_start(ns->queue); in nvme_freeze_queues()
2593 spin_lock(ns->queue->queue_lock); in nvme_freeze_queues()
2594 queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); in nvme_freeze_queues()
2595 spin_unlock(ns->queue->queue_lock); in nvme_freeze_queues()
2597 blk_mq_cancel_requeue_work(ns->queue); in nvme_freeze_queues()
2598 blk_mq_stop_hw_queues(ns->queue); in nvme_freeze_queues()
2604 struct nvme_ns *ns; in nvme_unfreeze_queues() local
2606 list_for_each_entry(ns, &dev->namespaces, list) { in nvme_unfreeze_queues()
2607 queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); in nvme_unfreeze_queues()
2608 blk_mq_unfreeze_queue(ns->queue); in nvme_unfreeze_queues()
2609 blk_mq_start_stopped_hw_queues(ns->queue, true); in nvme_unfreeze_queues()
2610 blk_mq_kick_requeue_list(ns->queue); in nvme_unfreeze_queues()
2643 struct nvme_ns *ns; in nvme_dev_remove() local
2645 list_for_each_entry(ns, &dev->namespaces, list) { in nvme_dev_remove()
2646 if (ns->disk->flags & GENHD_FL_UP) { in nvme_dev_remove()
2647 if (blk_get_integrity(ns->disk)) in nvme_dev_remove()
2648 blk_integrity_unregister(ns->disk); in nvme_dev_remove()
2649 del_gendisk(ns->disk); in nvme_dev_remove()
2651 if (!blk_queue_dying(ns->queue)) { in nvme_dev_remove()
2652 blk_mq_abort_requeue_list(ns->queue); in nvme_dev_remove()
2653 blk_cleanup_queue(ns->queue); in nvme_dev_remove()
2713 struct nvme_ns *ns, *next; in nvme_free_namespaces() local
2715 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { in nvme_free_namespaces()
2716 list_del(&ns->list); in nvme_free_namespaces()
2719 ns->disk->private_data = NULL; in nvme_free_namespaces()
2722 put_disk(ns->disk); in nvme_free_namespaces()
2723 kfree(ns); in nvme_free_namespaces()
2777 struct nvme_ns *ns; in nvme_dev_ioctl() local
2785 ns = list_first_entry(&dev->namespaces, struct nvme_ns, list); in nvme_dev_ioctl()
2786 return nvme_user_cmd(dev, ns, (void __user *)arg); in nvme_dev_ioctl()