Lines Matching refs:ns
523 static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, in nvme_trans_standard_inquiry_page() argument
527 struct nvme_dev *dev = ns->dev; in nvme_trans_standard_inquiry_page()
538 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_standard_inquiry_page()
567 static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, in nvme_trans_supported_vpd_pages() argument
587 static int nvme_trans_unit_serial_page(struct nvme_ns *ns, in nvme_trans_unit_serial_page() argument
591 struct nvme_dev *dev = ns->dev; in nvme_trans_unit_serial_page()
603 static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_device_id_page() argument
606 struct nvme_dev *dev = ns->dev; in nvme_trans_device_id_page()
610 __be32 tmp_id = cpu_to_be32(ns->ns_id); in nvme_trans_device_id_page()
619 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_device_id_page()
669 static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_ext_inq_page() argument
675 struct nvme_dev *dev = ns->dev; in nvme_trans_ext_inq_page()
691 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_ext_inq_page()
734 static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_bdev_limits_page() argument
738 nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); in nvme_trans_bdev_limits_page()
739 __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); in nvme_trans_bdev_limits_page()
754 static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_bdev_char_page() argument
784 static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_log_supp_pages() argument
812 static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, in nvme_trans_log_info_exceptions() argument
818 struct nvme_dev *dev = ns->dev; in nvme_trans_log_info_exceptions()
859 static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_log_temperature() argument
865 struct nvme_dev *dev = ns->dev; in nvme_trans_log_temperature()
946 static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_fill_blk_desc() argument
951 struct nvme_dev *dev = ns->dev; in nvme_trans_fill_blk_desc()
961 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_fill_blk_desc()
989 static int nvme_trans_fill_control_page(struct nvme_ns *ns, in nvme_trans_fill_control_page() argument
1011 static int nvme_trans_fill_caching_page(struct nvme_ns *ns, in nvme_trans_fill_caching_page() argument
1017 struct nvme_dev *dev = ns->dev; in nvme_trans_fill_caching_page()
1038 static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, in nvme_trans_fill_pow_cnd_page() argument
1052 static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, in nvme_trans_fill_inf_exc_page() argument
1067 static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_fill_all_pages() argument
1078 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], in nvme_trans_fill_all_pages()
1082 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], in nvme_trans_fill_all_pages()
1086 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], in nvme_trans_fill_all_pages()
1090 return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], in nvme_trans_fill_all_pages()
1104 static int nvme_trans_mode_page_create(struct nvme_ns *ns, in nvme_trans_mode_page_create() argument
1145 res = nvme_trans_fill_blk_desc(ns, hdr, in nvme_trans_mode_page_create()
1151 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], in nvme_trans_mode_page_create()
1205 static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_power_state() argument
1210 struct nvme_dev *dev = ns->dev; in nvme_trans_power_state()
1264 static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_send_activate_fw_cmd() argument
1274 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0); in nvme_trans_send_activate_fw_cmd()
1278 static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_send_download_fw_cmd() argument
1283 struct nvme_dev *dev = ns->dev; in nvme_trans_send_download_fw_cmd()
1322 static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, in nvme_trans_modesel_save_bd() argument
1333 ns->mode_select_num_blocks = in nvme_trans_modesel_save_bd()
1338 ns->mode_select_block_len = in nvme_trans_modesel_save_bd()
1344 ns->mode_select_num_blocks = in nvme_trans_modesel_save_bd()
1354 ns->mode_select_block_len = in nvme_trans_modesel_save_bd()
1362 static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_modesel_get_mp() argument
1367 struct nvme_dev *dev = ns->dev; in nvme_trans_modesel_get_mp()
1400 static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_modesel_data() argument
1428 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); in nvme_trans_modesel_data()
1456 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], in nvme_trans_modesel_data()
1471 static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, in nvme_trans_fmt_set_blk_size_count() argument
1476 struct nvme_dev *dev = ns->dev; in nvme_trans_fmt_set_blk_size_count()
1486 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { in nvme_trans_fmt_set_blk_size_count()
1489 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_fmt_set_blk_size_count()
1494 if (ns->mode_select_num_blocks == 0) in nvme_trans_fmt_set_blk_size_count()
1495 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); in nvme_trans_fmt_set_blk_size_count()
1496 if (ns->mode_select_block_len == 0) { in nvme_trans_fmt_set_blk_size_count()
1498 ns->mode_select_block_len = in nvme_trans_fmt_set_blk_size_count()
1568 static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_fmt_send_cmd() argument
1573 struct nvme_dev *dev = ns->dev; in nvme_trans_fmt_send_cmd()
1582 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_fmt_send_cmd()
1591 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { in nvme_trans_fmt_send_cmd()
1601 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { in nvme_trans_fmt_send_cmd()
1611 c.format.nsid = cpu_to_le32(ns->ns_id); in nvme_trans_fmt_send_cmd()
1634 static u16 nvme_trans_io_get_control(struct nvme_ns *ns, in nvme_trans_io_get_control() argument
1647 static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_do_nvme_io() argument
1661 u32 max_blocks = queue_max_hw_sectors(ns->queue); in nvme_trans_do_nvme_io()
1685 unit_num_blocks = unit_len >> ns->lba_shift; in nvme_trans_do_nvme_io()
1690 unit_len = unit_num_blocks << ns->lba_shift; in nvme_trans_do_nvme_io()
1692 ((1 << ns->lba_shift) * nvme_offset); in nvme_trans_do_nvme_io()
1696 c.rw.nsid = cpu_to_le32(ns->ns_id); in nvme_trans_do_nvme_io()
1699 control = nvme_trans_io_get_control(ns, cdb_info); in nvme_trans_do_nvme_io()
1702 if (get_capacity(ns->disk) - unit_num_blocks < in nvme_trans_do_nvme_io()
1707 nvme_sc = __nvme_submit_sync_cmd(ns->queue, &c, NULL, in nvme_trans_do_nvme_io()
1721 static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, in nvme_trans_io() argument
1744 if (cdb_info.prot_info && !ns->pi_type) { in nvme_trans_io()
1792 if (sgl.iov_len % (1 << ns->lba_shift) != 0) { in nvme_trans_io()
1809 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { in nvme_trans_io()
1819 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); in nvme_trans_io()
1827 static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_inquiry() argument
1849 res = nvme_trans_standard_inquiry_page(ns, hdr, in nvme_trans_inquiry()
1861 res = nvme_trans_supported_vpd_pages(ns, hdr, in nvme_trans_inquiry()
1865 res = nvme_trans_unit_serial_page(ns, hdr, inq_response, in nvme_trans_inquiry()
1869 res = nvme_trans_device_id_page(ns, hdr, inq_response, in nvme_trans_inquiry()
1873 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); in nvme_trans_inquiry()
1876 res = nvme_trans_bdev_limits_page(ns, hdr, inq_response, in nvme_trans_inquiry()
1880 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); in nvme_trans_inquiry()
1896 static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_log_sense() argument
1922 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); in nvme_trans_log_sense()
1925 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); in nvme_trans_log_sense()
1928 res = nvme_trans_log_temperature(ns, hdr, alloc_len); in nvme_trans_log_sense()
1941 static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_mode_select() argument
1964 return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, in nvme_trans_mode_select()
1971 static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_mode_sense() argument
1995 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, in nvme_trans_mode_sense()
2001 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, in nvme_trans_mode_sense()
2007 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, in nvme_trans_mode_sense()
2013 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, in nvme_trans_mode_sense()
2019 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, in nvme_trans_mode_sense()
2035 static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_read_capacity() argument
2043 struct nvme_dev *dev = ns->dev; in nvme_trans_read_capacity()
2055 nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); in nvme_trans_read_capacity()
2076 static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_report_luns() argument
2083 struct nvme_dev *dev = ns->dev; in nvme_trans_report_luns()
2143 static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_request_sense() argument
2192 static int nvme_trans_security_protocol(struct nvme_ns *ns, in nvme_trans_security_protocol() argument
2201 static int nvme_trans_synchronize_cache(struct nvme_ns *ns, in nvme_trans_synchronize_cache() argument
2209 c.common.nsid = cpu_to_le32(ns->ns_id); in nvme_trans_synchronize_cache()
2211 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0); in nvme_trans_synchronize_cache()
2215 static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_start_stop() argument
2233 int res = nvme_trans_synchronize_cache(ns, hdr); in nvme_trans_start_stop()
2238 return nvme_trans_power_state(ns, hdr, pc, pcmod, start); in nvme_trans_start_stop()
2242 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_format_unit() argument
2281 res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0); in nvme_trans_format_unit()
2284 res = nvme_trans_fmt_set_blk_size_count(ns, hdr); in nvme_trans_format_unit()
2288 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); in nvme_trans_format_unit()
2294 static int nvme_trans_test_unit_ready(struct nvme_ns *ns, in nvme_trans_test_unit_ready() argument
2298 struct nvme_dev *dev = ns->dev; in nvme_trans_test_unit_ready()
2308 static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_write_buffer() argument
2335 res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw, in nvme_trans_write_buffer()
2340 res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id); in nvme_trans_write_buffer()
2343 res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw, in nvme_trans_write_buffer()
2348 res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id); in nvme_trans_write_buffer()
2374 static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, in nvme_trans_unmap() argument
2415 c.dsm.nsid = cpu_to_le32(ns->ns_id); in nvme_trans_unmap()
2419 nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range, in nvme_trans_unmap()
2429 static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) in nvme_scsi_translate() argument
2455 retcode = nvme_trans_io(ns, hdr, 0, cmd); in nvme_scsi_translate()
2461 retcode = nvme_trans_io(ns, hdr, 1, cmd); in nvme_scsi_translate()
2464 retcode = nvme_trans_inquiry(ns, hdr, cmd); in nvme_scsi_translate()
2467 retcode = nvme_trans_log_sense(ns, hdr, cmd); in nvme_scsi_translate()
2471 retcode = nvme_trans_mode_select(ns, hdr, cmd); in nvme_scsi_translate()
2475 retcode = nvme_trans_mode_sense(ns, hdr, cmd); in nvme_scsi_translate()
2478 retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0); in nvme_scsi_translate()
2483 retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1); in nvme_scsi_translate()
2490 retcode = nvme_trans_report_luns(ns, hdr, cmd); in nvme_scsi_translate()
2493 retcode = nvme_trans_request_sense(ns, hdr, cmd); in nvme_scsi_translate()
2497 retcode = nvme_trans_security_protocol(ns, hdr, cmd); in nvme_scsi_translate()
2500 retcode = nvme_trans_start_stop(ns, hdr, cmd); in nvme_scsi_translate()
2503 retcode = nvme_trans_synchronize_cache(ns, hdr); in nvme_scsi_translate()
2506 retcode = nvme_trans_format_unit(ns, hdr, cmd); in nvme_scsi_translate()
2509 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); in nvme_scsi_translate()
2512 retcode = nvme_trans_write_buffer(ns, hdr, cmd); in nvme_scsi_translate()
2515 retcode = nvme_trans_unmap(ns, hdr, cmd); in nvme_scsi_translate()
2527 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) in nvme_sg_io() argument
2545 retcode = nvme_scsi_translate(ns, &hdr); in nvme_sg_io()