Lines Matching refs:tgt

132 	struct sbp2_target *tgt;  member
182 static struct fw_device *target_parent_device(struct sbp2_target *tgt) in target_parent_device() argument
184 return fw_parent_device(tgt->unit); in target_parent_device()
187 static const struct device *tgt_dev(const struct sbp2_target *tgt) in tgt_dev() argument
189 return &tgt->unit->device; in tgt_dev()
194 return &lu->tgt->unit->device; in lu_dev()
448 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_status_write()
457 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_status_write()
484 spin_lock_irqsave(&orb->lu->tgt->lock, flags); in complete_transaction()
490 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
495 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
504 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_orb()
512 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_send_orb()
514 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_send_orb()
526 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_cancel_orbs()
532 spin_lock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
534 spin_unlock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
564 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_management_orb()
603 timeout = lu->tgt->mgt_orb_timeout; in sbp2_send_management_orb()
618 lu->tgt->management_agent_address); in sbp2_send_management_orb()
660 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset()
664 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset()
677 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset_no_wait()
686 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset_no_wait()
691 static inline void sbp2_allow_block(struct sbp2_target *tgt) in sbp2_allow_block() argument
693 spin_lock_irq(&tgt->lock); in sbp2_allow_block()
694 --tgt->dont_block; in sbp2_allow_block()
695 spin_unlock_irq(&tgt->lock); in sbp2_allow_block()
710 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_block() local
711 struct fw_card *card = target_parent_device(tgt)->card; in sbp2_conditionally_block()
713 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_conditionally_block()
716 spin_lock_irqsave(&tgt->lock, flags); in sbp2_conditionally_block()
717 if (!tgt->dont_block && !lu->blocked && in sbp2_conditionally_block()
720 if (++tgt->blocked == 1) in sbp2_conditionally_block()
723 spin_unlock_irqrestore(&tgt->lock, flags); in sbp2_conditionally_block()
734 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_unblock() local
735 struct fw_card *card = target_parent_device(tgt)->card; in sbp2_conditionally_unblock()
737 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_conditionally_unblock()
740 spin_lock_irq(&tgt->lock); in sbp2_conditionally_unblock()
743 unblock = --tgt->blocked == 0; in sbp2_conditionally_unblock()
745 spin_unlock_irq(&tgt->lock); in sbp2_conditionally_unblock()
757 static void sbp2_unblock(struct sbp2_target *tgt) in sbp2_unblock() argument
760 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_unblock()
762 spin_lock_irq(&tgt->lock); in sbp2_unblock()
763 ++tgt->dont_block; in sbp2_unblock()
764 spin_unlock_irq(&tgt->lock); in sbp2_unblock()
798 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_set_busy_timeout()
802 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_set_busy_timeout()
812 struct sbp2_target *tgt = lu->tgt; in sbp2_login() local
813 struct fw_device *device = target_parent_device(tgt); in sbp2_login()
837 dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n", in sbp2_login()
840 sbp2_unblock(lu->tgt); in sbp2_login()
845 tgt->node_id = node_id; in sbp2_login()
846 tgt->address_high = local_node_id << 16; in sbp2_login()
855 dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n", in sbp2_login()
872 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) in sbp2_login()
875 shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_login()
900 sbp2_allow_block(tgt); in sbp2_login()
922 struct sbp2_target *tgt = lu->tgt; in sbp2_reconnect() local
923 struct fw_device *device = target_parent_device(tgt); in sbp2_reconnect()
947 dev_err(tgt_dev(tgt), "failed to reconnect\n"); in sbp2_reconnect()
956 tgt->node_id = node_id; in sbp2_reconnect()
957 tgt->address_high = local_node_id << 16; in sbp2_reconnect()
961 dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n", in sbp2_reconnect()
976 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) in sbp2_add_logical_unit() argument
994 lu->tgt = tgt; in sbp2_add_logical_unit()
1000 ++tgt->dont_block; in sbp2_add_logical_unit()
1005 list_add_tail(&lu->link, &tgt->lu_list); in sbp2_add_logical_unit()
1009 static void sbp2_get_unit_unique_id(struct sbp2_target *tgt, in sbp2_get_unit_unique_id() argument
1013 tgt->guid = (u64)leaf[1] << 32 | leaf[2]; in sbp2_get_unit_unique_id()
1016 static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, in sbp2_scan_logical_unit_dir() argument
1025 sbp2_add_logical_unit(tgt, value) < 0) in sbp2_scan_logical_unit_dir()
1030 static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory, in sbp2_scan_unit_dir() argument
1041 tgt->management_agent_address = in sbp2_scan_unit_dir()
1046 tgt->directory_id = value; in sbp2_scan_unit_dir()
1059 tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500; in sbp2_scan_unit_dir()
1063 if (sbp2_add_logical_unit(tgt, value) < 0) in sbp2_scan_unit_dir()
1068 sbp2_get_unit_unique_id(tgt, ci.p - 1 + value); in sbp2_scan_unit_dir()
1073 if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) in sbp2_scan_unit_dir()
1086 static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) in sbp2_clamp_management_orb_timeout() argument
1088 unsigned int timeout = tgt->mgt_orb_timeout; in sbp2_clamp_management_orb_timeout()
1091 dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n", in sbp2_clamp_management_orb_timeout()
1094 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); in sbp2_clamp_management_orb_timeout()
1097 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, in sbp2_init_workarounds() argument
1104 dev_notice(tgt_dev(tgt), in sbp2_init_workarounds()
1126 dev_notice(tgt_dev(tgt), "workarounds 0x%x " in sbp2_init_workarounds()
1129 tgt->workarounds = w; in sbp2_init_workarounds()
1138 struct sbp2_target *tgt; in sbp2_probe() local
1151 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); in sbp2_probe()
1155 tgt = (struct sbp2_target *)shost->hostdata; in sbp2_probe()
1156 dev_set_drvdata(&unit->device, tgt); in sbp2_probe()
1157 tgt->unit = unit; in sbp2_probe()
1158 INIT_LIST_HEAD(&tgt->lu_list); in sbp2_probe()
1159 spin_lock_init(&tgt->lock); in sbp2_probe()
1160 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; in sbp2_probe()
1172 tgt->directory_id = ((unit->directory - device->config_rom) * 4 in sbp2_probe()
1178 if (sbp2_scan_unit_dir(tgt, unit->directory, &model, in sbp2_probe()
1182 sbp2_clamp_management_orb_timeout(tgt); in sbp2_probe()
1183 sbp2_init_workarounds(tgt, model, firmware_revision); in sbp2_probe()
1191 tgt->max_payload = min3(device->max_speed + 7, 10U, in sbp2_probe()
1195 list_for_each_entry(lu, &tgt->lu_list, link) in sbp2_probe()
1211 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); in sbp2_update() local
1220 list_for_each_entry(lu, &tgt->lu_list, link) { in sbp2_update()
1230 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); in sbp2_remove() local
1233 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_remove()
1237 sbp2_unblock(tgt); in sbp2_remove()
1239 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { in sbp2_remove()
1356 struct fw_device *device = target_parent_device(base_orb->lu->tgt); in complete_command_orb()
1417 cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1443 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1462 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_scsi_queuecommand()
1476 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | in sbp2_scsi_queuecommand()
1500 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, in sbp2_scsi_queuecommand()
1524 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) in sbp2_scsi_slave_alloc()
1543 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) in sbp2_scsi_slave_configure()
1546 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) in sbp2_scsi_slave_configure()
1549 if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) in sbp2_scsi_slave_configure()
1552 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) in sbp2_scsi_slave_configure()
1592 (unsigned long long)lu->tgt->guid, in sbp2_sysfs_ieee1394_id_show()
1593 lu->tgt->directory_id, lu->lun); in sbp2_sysfs_ieee1394_id_show()