Lines Matching refs:dev

63 	struct se_device *dev;  in transport_lookup_cmd_lun()  local
134 dev = se_lun->lun_se_dev; in transport_lookup_cmd_lun()
135 atomic_long_inc(&dev->num_cmds); in transport_lookup_cmd_lun()
137 atomic_long_add(se_cmd->data_length, &dev->write_bytes); in transport_lookup_cmd_lun()
139 atomic_long_add(se_cmd->data_length, &dev->read_bytes); in transport_lookup_cmd_lun()
461 static struct se_port *core_alloc_port(struct se_device *dev) in core_alloc_port() argument
476 spin_lock(&dev->se_port_lock); in core_alloc_port()
477 if (dev->dev_port_count == 0x0000ffff) { in core_alloc_port()
480 spin_unlock(&dev->se_port_lock); in core_alloc_port()
496 port->sep_rtpi = dev->dev_rpti_counter++; in core_alloc_port()
500 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { in core_alloc_port()
508 spin_unlock(&dev->se_port_lock); in core_alloc_port()
514 struct se_device *dev, in core_export_port() argument
521 spin_lock(&dev->se_port_lock); in core_export_port()
528 list_add_tail(&port->sep_list, &dev->dev_sep_list); in core_export_port()
529 spin_unlock(&dev->se_port_lock); in core_export_port()
531 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && in core_export_port()
532 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { in core_export_port()
541 dev->t10_alua.default_tg_pt_gp); in core_export_port()
545 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); in core_export_port()
548 dev->dev_port_count++; in core_export_port()
555 static void core_release_port(struct se_device *dev, struct se_port *port) in core_release_port() argument
556 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) in core_release_port()
562 spin_unlock(&dev->se_port_lock); in core_release_port()
565 spin_lock(&dev->se_port_lock); in core_release_port()
570 dev->dev_port_count--; in core_release_port()
575 struct se_device *dev, in core_dev_export() argument
579 struct se_hba *hba = dev->se_hba; in core_dev_export()
582 port = core_alloc_port(dev); in core_dev_export()
586 lun->lun_se_dev = dev; in core_dev_export()
589 dev->export_count++; in core_dev_export()
592 core_export_port(dev, tpg, port, lun); in core_dev_export()
597 struct se_device *dev, in core_dev_unexport() argument
601 struct se_hba *hba = dev->se_hba; in core_dev_unexport()
611 spin_lock(&dev->se_port_lock); in core_dev_unexport()
612 core_release_port(dev, port); in core_dev_unexport()
613 spin_unlock(&dev->se_port_lock); in core_dev_unexport()
616 dev->export_count--; in core_dev_unexport()
623 static void se_release_vpd_for_dev(struct se_device *dev) in se_release_vpd_for_dev() argument
627 spin_lock(&dev->t10_wwn.t10_vpd_lock); in se_release_vpd_for_dev()
629 &dev->t10_wwn.t10_vpd_list, vpd_list) { in se_release_vpd_for_dev()
633 spin_unlock(&dev->t10_wwn.t10_vpd_lock); in se_release_vpd_for_dev()
654 bool se_dev_check_wce(struct se_device *dev) in se_dev_check_wce() argument
658 if (dev->transport->get_write_cache) in se_dev_check_wce()
659 wce = dev->transport->get_write_cache(dev); in se_dev_check_wce()
660 else if (dev->dev_attrib.emulate_write_cache > 0) in se_dev_check_wce()
667 struct se_device *dev, in se_dev_set_max_unmap_lba_count() argument
670 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; in se_dev_set_max_unmap_lba_count()
672 dev, dev->dev_attrib.max_unmap_lba_count); in se_dev_set_max_unmap_lba_count()
678 struct se_device *dev, in se_dev_set_max_unmap_block_desc_count() argument
681 dev->dev_attrib.max_unmap_block_desc_count = in se_dev_set_max_unmap_block_desc_count()
684 dev, dev->dev_attrib.max_unmap_block_desc_count); in se_dev_set_max_unmap_block_desc_count()
690 struct se_device *dev, in se_dev_set_unmap_granularity() argument
693 dev->dev_attrib.unmap_granularity = unmap_granularity; in se_dev_set_unmap_granularity()
695 dev, dev->dev_attrib.unmap_granularity); in se_dev_set_unmap_granularity()
701 struct se_device *dev, in se_dev_set_unmap_granularity_alignment() argument
704 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; in se_dev_set_unmap_granularity_alignment()
706 dev, dev->dev_attrib.unmap_granularity_alignment); in se_dev_set_unmap_granularity_alignment()
712 struct se_device *dev, in se_dev_set_max_write_same_len() argument
715 dev->dev_attrib.max_write_same_len = max_write_same_len; in se_dev_set_max_write_same_len()
717 dev, dev->dev_attrib.max_write_same_len); in se_dev_set_max_write_same_len()
722 static void dev_set_t10_wwn_model_alias(struct se_device *dev) in dev_set_t10_wwn_model_alias() argument
726 configname = config_item_name(&dev->dev_group.cg_item); in dev_set_t10_wwn_model_alias()
729 "INQUIRY_MODEL, truncating to 16 bytes\n", dev, in dev_set_t10_wwn_model_alias()
732 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); in dev_set_t10_wwn_model_alias()
735 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) in se_dev_set_emulate_model_alias() argument
737 if (dev->export_count) { in se_dev_set_emulate_model_alias()
740 dev, dev->export_count); in se_dev_set_emulate_model_alias()
750 dev_set_t10_wwn_model_alias(dev); in se_dev_set_emulate_model_alias()
752 strncpy(&dev->t10_wwn.model[0], in se_dev_set_emulate_model_alias()
753 dev->transport->inquiry_prod, 16); in se_dev_set_emulate_model_alias()
755 dev->dev_attrib.emulate_model_alias = flag; in se_dev_set_emulate_model_alias()
761 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) in se_dev_set_emulate_dpo() argument
777 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) in se_dev_set_emulate_fua_write() argument
784 dev->transport->get_write_cache) { in se_dev_set_emulate_fua_write()
788 if (dev->export_count) { in se_dev_set_emulate_fua_write()
790 " exports: %d\n", dev->export_count); in se_dev_set_emulate_fua_write()
793 dev->dev_attrib.emulate_fua_write = flag; in se_dev_set_emulate_fua_write()
795 dev, dev->dev_attrib.emulate_fua_write); in se_dev_set_emulate_fua_write()
800 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) in se_dev_set_emulate_fua_read() argument
816 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) in se_dev_set_emulate_write_cache() argument
823 dev->transport->get_write_cache) { in se_dev_set_emulate_write_cache()
827 if (dev->export_count) { in se_dev_set_emulate_write_cache()
829 " exports: %d\n", dev->export_count); in se_dev_set_emulate_write_cache()
832 dev->dev_attrib.emulate_write_cache = flag; in se_dev_set_emulate_write_cache()
834 dev, dev->dev_attrib.emulate_write_cache); in se_dev_set_emulate_write_cache()
839 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) in se_dev_set_emulate_ua_intlck_ctrl() argument
846 if (dev->export_count) { in se_dev_set_emulate_ua_intlck_ctrl()
849 dev, dev->export_count); in se_dev_set_emulate_ua_intlck_ctrl()
852 dev->dev_attrib.emulate_ua_intlck_ctrl = flag; in se_dev_set_emulate_ua_intlck_ctrl()
854 dev, dev->dev_attrib.emulate_ua_intlck_ctrl); in se_dev_set_emulate_ua_intlck_ctrl()
860 int se_dev_set_emulate_tas(struct se_device *dev, int flag) in se_dev_set_emulate_tas() argument
867 if (dev->export_count) { in se_dev_set_emulate_tas()
870 dev, dev->export_count); in se_dev_set_emulate_tas()
873 dev->dev_attrib.emulate_tas = flag; in se_dev_set_emulate_tas()
875 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); in se_dev_set_emulate_tas()
881 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) in se_dev_set_emulate_tpu() argument
891 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { in se_dev_set_emulate_tpu()
896 dev->dev_attrib.emulate_tpu = flag; in se_dev_set_emulate_tpu()
898 dev, flag); in se_dev_set_emulate_tpu()
903 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) in se_dev_set_emulate_tpws() argument
913 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { in se_dev_set_emulate_tpws()
918 dev->dev_attrib.emulate_tpws = flag; in se_dev_set_emulate_tpws()
920 dev, flag); in se_dev_set_emulate_tpws()
925 int se_dev_set_emulate_caw(struct se_device *dev, int flag) in se_dev_set_emulate_caw() argument
931 dev->dev_attrib.emulate_caw = flag; in se_dev_set_emulate_caw()
933 dev, flag); in se_dev_set_emulate_caw()
939 int se_dev_set_emulate_3pc(struct se_device *dev, int flag) in se_dev_set_emulate_3pc() argument
945 dev->dev_attrib.emulate_3pc = flag; in se_dev_set_emulate_3pc()
947 dev, flag); in se_dev_set_emulate_3pc()
953 int se_dev_set_pi_prot_type(struct se_device *dev, int flag) in se_dev_set_pi_prot_type() argument
955 int rc, old_prot = dev->dev_attrib.pi_prot_type; in se_dev_set_pi_prot_type()
965 if (dev->dev_attrib.hw_pi_prot_type) { in se_dev_set_pi_prot_type()
970 if (!dev->transport->init_prot || !dev->transport->free_prot) { in se_dev_set_pi_prot_type()
976 dev->transport->name); in se_dev_set_pi_prot_type()
979 if (!(dev->dev_flags & DF_CONFIGURED)) { in se_dev_set_pi_prot_type()
983 if (dev->export_count) { in se_dev_set_pi_prot_type()
985 " export_count is %d\n", dev, dev->export_count); in se_dev_set_pi_prot_type()
989 dev->dev_attrib.pi_prot_type = flag; in se_dev_set_pi_prot_type()
992 rc = dev->transport->init_prot(dev); in se_dev_set_pi_prot_type()
994 dev->dev_attrib.pi_prot_type = old_prot; in se_dev_set_pi_prot_type()
999 dev->transport->free_prot(dev); in se_dev_set_pi_prot_type()
1001 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); in se_dev_set_pi_prot_type()
1007 int se_dev_set_pi_prot_format(struct se_device *dev, int flag) in se_dev_set_pi_prot_format() argument
1018 if (!dev->transport->format_prot) { in se_dev_set_pi_prot_format()
1020 dev->transport->name); in se_dev_set_pi_prot_format()
1023 if (!(dev->dev_flags & DF_CONFIGURED)) { in se_dev_set_pi_prot_format()
1027 if (dev->export_count) { in se_dev_set_pi_prot_format()
1029 " export_count is %d\n", dev, dev->export_count); in se_dev_set_pi_prot_format()
1033 rc = dev->transport->format_prot(dev); in se_dev_set_pi_prot_format()
1037 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); in se_dev_set_pi_prot_format()
1043 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) in se_dev_set_enforce_pr_isids() argument
1049 dev->dev_attrib.enforce_pr_isids = flag; in se_dev_set_enforce_pr_isids()
1050 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, in se_dev_set_enforce_pr_isids()
1051 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); in se_dev_set_enforce_pr_isids()
1056 int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) in se_dev_set_force_pr_aptpl() argument
1062 if (dev->export_count) { in se_dev_set_force_pr_aptpl()
1064 " export_count is %d\n", dev, dev->export_count); in se_dev_set_force_pr_aptpl()
1068 dev->dev_attrib.force_pr_aptpl = flag; in se_dev_set_force_pr_aptpl()
1069 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); in se_dev_set_force_pr_aptpl()
1074 int se_dev_set_is_nonrot(struct se_device *dev, int flag) in se_dev_set_is_nonrot() argument
1080 dev->dev_attrib.is_nonrot = flag; in se_dev_set_is_nonrot()
1082 dev, flag); in se_dev_set_is_nonrot()
1087 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) in se_dev_set_emulate_rest_reord() argument
1091 " reordering not implemented\n", dev); in se_dev_set_emulate_rest_reord()
1094 dev->dev_attrib.emulate_rest_reord = flag; in se_dev_set_emulate_rest_reord()
1095 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); in se_dev_set_emulate_rest_reord()
1103 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) in se_dev_set_queue_depth() argument
1105 if (dev->export_count) { in se_dev_set_queue_depth()
1108 dev, dev->export_count); in se_dev_set_queue_depth()
1113 "_depth\n", dev); in se_dev_set_queue_depth()
1117 if (queue_depth > dev->dev_attrib.queue_depth) { in se_dev_set_queue_depth()
1118 if (queue_depth > dev->dev_attrib.hw_queue_depth) { in se_dev_set_queue_depth()
1121 " TCQ: %u\n", dev, queue_depth, in se_dev_set_queue_depth()
1122 dev->dev_attrib.hw_queue_depth); in se_dev_set_queue_depth()
1126 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; in se_dev_set_queue_depth()
1128 dev, queue_depth); in se_dev_set_queue_depth()
1133 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) in se_dev_set_optimal_sectors() argument
1135 if (dev->export_count) { in se_dev_set_optimal_sectors()
1138 dev, dev->export_count); in se_dev_set_optimal_sectors()
1141 if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { in se_dev_set_optimal_sectors()
1143 " greater than hw_max_sectors: %u\n", dev, in se_dev_set_optimal_sectors()
1144 optimal_sectors, dev->dev_attrib.hw_max_sectors); in se_dev_set_optimal_sectors()
1148 dev->dev_attrib.optimal_sectors = optimal_sectors; in se_dev_set_optimal_sectors()
1150 dev, optimal_sectors); in se_dev_set_optimal_sectors()
1155 int se_dev_set_block_size(struct se_device *dev, u32 block_size) in se_dev_set_block_size() argument
1157 if (dev->export_count) { in se_dev_set_block_size()
1160 dev, dev->export_count); in se_dev_set_block_size()
1170 dev, block_size); in se_dev_set_block_size()
1174 dev->dev_attrib.block_size = block_size; in se_dev_set_block_size()
1176 dev, block_size); in se_dev_set_block_size()
1178 if (dev->dev_attrib.max_bytes_per_io) in se_dev_set_block_size()
1179 dev->dev_attrib.hw_max_sectors = in se_dev_set_block_size()
1180 dev->dev_attrib.max_bytes_per_io / block_size; in se_dev_set_block_size()
1188 struct se_device *dev, in core_dev_add_lun() argument
1199 TRANSPORT_LUNFLAGS_READ_WRITE, dev); in core_dev_add_lun()
1206 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); in core_dev_add_lun()
1435 static void scsi_dump_inquiry(struct se_device *dev) in scsi_dump_inquiry() argument
1437 struct t10_wwn *wwn = &dev->t10_wwn; in scsi_dump_inquiry()
1467 device_type = dev->transport->get_device_type(dev); in scsi_dump_inquiry()
1473 struct se_device *dev; in target_alloc_device() local
1476 dev = hba->transport->alloc_device(hba, name); in target_alloc_device()
1477 if (!dev) in target_alloc_device()
1480 dev->dev_link_magic = SE_DEV_LINK_MAGIC; in target_alloc_device()
1481 dev->se_hba = hba; in target_alloc_device()
1482 dev->transport = hba->transport; in target_alloc_device()
1483 dev->prot_length = sizeof(struct se_dif_v1_tuple); in target_alloc_device()
1485 INIT_LIST_HEAD(&dev->dev_list); in target_alloc_device()
1486 INIT_LIST_HEAD(&dev->dev_sep_list); in target_alloc_device()
1487 INIT_LIST_HEAD(&dev->dev_tmr_list); in target_alloc_device()
1488 INIT_LIST_HEAD(&dev->delayed_cmd_list); in target_alloc_device()
1489 INIT_LIST_HEAD(&dev->state_list); in target_alloc_device()
1490 INIT_LIST_HEAD(&dev->qf_cmd_list); in target_alloc_device()
1491 INIT_LIST_HEAD(&dev->g_dev_node); in target_alloc_device()
1492 spin_lock_init(&dev->execute_task_lock); in target_alloc_device()
1493 spin_lock_init(&dev->delayed_cmd_lock); in target_alloc_device()
1494 spin_lock_init(&dev->dev_reservation_lock); in target_alloc_device()
1495 spin_lock_init(&dev->se_port_lock); in target_alloc_device()
1496 spin_lock_init(&dev->se_tmr_lock); in target_alloc_device()
1497 spin_lock_init(&dev->qf_cmd_lock); in target_alloc_device()
1498 sema_init(&dev->caw_sem, 1); in target_alloc_device()
1499 atomic_set(&dev->dev_ordered_id, 0); in target_alloc_device()
1500 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); in target_alloc_device()
1501 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); in target_alloc_device()
1502 INIT_LIST_HEAD(&dev->t10_pr.registration_list); in target_alloc_device()
1503 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); in target_alloc_device()
1504 spin_lock_init(&dev->t10_pr.registration_lock); in target_alloc_device()
1505 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); in target_alloc_device()
1506 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); in target_alloc_device()
1507 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); in target_alloc_device()
1508 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); in target_alloc_device()
1509 spin_lock_init(&dev->t10_alua.lba_map_lock); in target_alloc_device()
1511 dev->t10_wwn.t10_dev = dev; in target_alloc_device()
1512 dev->t10_alua.t10_dev = dev; in target_alloc_device()
1514 dev->dev_attrib.da_dev = dev; in target_alloc_device()
1515 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; in target_alloc_device()
1516 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; in target_alloc_device()
1517 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; in target_alloc_device()
1518 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; in target_alloc_device()
1519 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; in target_alloc_device()
1520 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; in target_alloc_device()
1521 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; in target_alloc_device()
1522 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; in target_alloc_device()
1523 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; in target_alloc_device()
1524 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; in target_alloc_device()
1525 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; in target_alloc_device()
1526 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; in target_alloc_device()
1527 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; in target_alloc_device()
1528 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; in target_alloc_device()
1529 dev->dev_attrib.is_nonrot = DA_IS_NONROT; in target_alloc_device()
1530 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; in target_alloc_device()
1531 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; in target_alloc_device()
1532 dev->dev_attrib.max_unmap_block_desc_count = in target_alloc_device()
1534 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; in target_alloc_device()
1535 dev->dev_attrib.unmap_granularity_alignment = in target_alloc_device()
1537 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; in target_alloc_device()
1539 xcopy_lun = &dev->xcopy_lun; in target_alloc_device()
1540 xcopy_lun->lun_se_dev = dev; in target_alloc_device()
1547 return dev; in target_alloc_device()
1550 int target_configure_device(struct se_device *dev) in target_configure_device() argument
1552 struct se_hba *hba = dev->se_hba; in target_configure_device()
1555 if (dev->dev_flags & DF_CONFIGURED) { in target_configure_device()
1561 ret = dev->transport->configure_device(dev); in target_configure_device()
1567 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; in target_configure_device()
1568 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; in target_configure_device()
1573 dev->dev_attrib.hw_max_sectors = in target_configure_device()
1574 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, in target_configure_device()
1575 dev->dev_attrib.hw_block_size); in target_configure_device()
1576 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; in target_configure_device()
1578 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); in target_configure_device()
1579 dev->creation_time = get_jiffies_64(); in target_configure_device()
1581 ret = core_setup_alua(dev); in target_configure_device()
1588 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, in target_configure_device()
1589 dev->transport->name); in target_configure_device()
1590 if (!dev->tmr_wq) { in target_configure_device()
1592 dev->transport->name); in target_configure_device()
1600 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); in target_configure_device()
1607 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { in target_configure_device()
1608 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); in target_configure_device()
1609 strncpy(&dev->t10_wwn.model[0], in target_configure_device()
1610 dev->transport->inquiry_prod, 16); in target_configure_device()
1611 strncpy(&dev->t10_wwn.revision[0], in target_configure_device()
1612 dev->transport->inquiry_rev, 4); in target_configure_device()
1615 scsi_dump_inquiry(dev); in target_configure_device()
1622 list_add_tail(&dev->g_dev_node, &g_device_list); in target_configure_device()
1625 dev->dev_flags |= DF_CONFIGURED; in target_configure_device()
1630 core_alua_free_lu_gp_mem(dev); in target_configure_device()
1632 se_release_vpd_for_dev(dev); in target_configure_device()
1636 void target_free_device(struct se_device *dev) in target_free_device() argument
1638 struct se_hba *hba = dev->se_hba; in target_free_device()
1640 WARN_ON(!list_empty(&dev->dev_sep_list)); in target_free_device()
1642 if (dev->dev_flags & DF_CONFIGURED) { in target_free_device()
1643 destroy_workqueue(dev->tmr_wq); in target_free_device()
1646 list_del(&dev->g_dev_node); in target_free_device()
1654 core_alua_free_lu_gp_mem(dev); in target_free_device()
1655 core_alua_set_lba_map(dev, NULL, 0, 0); in target_free_device()
1656 core_scsi3_free_all_registrations(dev); in target_free_device()
1657 se_release_vpd_for_dev(dev); in target_free_device()
1659 if (dev->transport->free_prot) in target_free_device()
1660 dev->transport->free_prot(dev); in target_free_device()
1662 dev->transport->free_device(dev); in target_free_device()
1668 struct se_device *dev; in core_dev_setup_virtual_lun0() local
1676 dev = target_alloc_device(hba, "virt_lun0"); in core_dev_setup_virtual_lun0()
1677 if (!dev) { in core_dev_setup_virtual_lun0()
1682 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); in core_dev_setup_virtual_lun0()
1684 ret = target_configure_device(dev); in core_dev_setup_virtual_lun0()
1689 g_lun0_dev = dev; in core_dev_setup_virtual_lun0()
1693 target_free_device(dev); in core_dev_setup_virtual_lun0()