tgt                78 arch/mips/bcm63xx/irq.c 	unsigned int src, tgt;						\
tgt                86 arch/mips/bcm63xx/irq.c 	for (src = 0, tgt = (width / 32); src < (width / 32); src++) {	\
tgt                91 arch/mips/bcm63xx/irq.c 		pending[--tgt] = val;					\
tgt               159 arch/mips/net/ebpf_jit.c static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
tgt               176 arch/mips/net/ebpf_jit.c 	return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
tgt               191 drivers/block/aoe/aoe.h 	struct aoetgt **tgt;	/* target in use when working */
tgt               262 drivers/block/aoe/aoecmd.c 	tt = d->tgt;	/* last used target */
tgt               278 drivers/block/aoe/aoecmd.c 				d->tgt = tt;
tgt               282 drivers/block/aoe/aoecmd.c 		if (tt == d->tgt) {	/* we've looped and found nada */
tgt              1389 drivers/block/aoe/aoecmd.c 	t = *d->tgt;
tgt              1433 drivers/block/aoe/aoecmd.c 	d->tgt = tt + (d->tgt - d->targets);
tgt               481 drivers/block/aoe/aoedev.c 	d->tgt = d->targets;
tgt               101 drivers/block/rsxx/dma.c 	unsigned int tgt;
tgt               103 drivers/block/rsxx/dma.c 	tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
tgt               105 drivers/block/rsxx/dma.c 	return tgt;
tgt               682 drivers/block/rsxx/dma.c 	int tgt;
tgt               698 drivers/block/rsxx/dma.c 			tgt   = rsxx_get_dma_tgt(card, addr8);
tgt               701 drivers/block/rsxx/dma.c 			st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
tgt               706 drivers/block/rsxx/dma.c 			dma_cnt[tgt]++;
tgt               717 drivers/block/rsxx/dma.c 				tgt   = rsxx_get_dma_tgt(card, addr8);
tgt               723 drivers/block/rsxx/dma.c 				st = rsxx_queue_dma(card, &dma_list[tgt],
tgt               731 drivers/block/rsxx/dma.c 				dma_cnt[tgt]++;
tgt              1731 drivers/crypto/n2_core.c 		u64 tgt = mdesc_arc_target(mdesc, arc);
tgt              1732 drivers/crypto/n2_core.c 		const char *name = mdesc_node_name(mdesc, tgt);
tgt              1737 drivers/crypto/n2_core.c 		id = mdesc_get_property(mdesc, tgt, "id", NULL);
tgt               212 drivers/edac/skx_base.c #define SKX_ILV_REMOTE(tgt)	(((tgt) & 8) == 0)
tgt               213 drivers/edac/skx_base.c #define SKX_ILV_TARGET(tgt)	((tgt) & 7)
tgt               219 drivers/edac/skx_base.c 	int i, idx, tgt, lchan, shift;
tgt               262 drivers/edac/skx_base.c 	tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
tgt               265 drivers/edac/skx_base.c 	if (SKX_ILV_REMOTE(tgt)) {
tgt               272 drivers/edac/skx_base.c 			if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
tgt               275 drivers/edac/skx_base.c 		edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
tgt               280 drivers/edac/skx_base.c 		lchan = SKX_ILV_TARGET(tgt);
tgt               311 drivers/edac/skx_base.c 		lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
tgt               119 drivers/firewire/sbp2.c 	struct sbp2_target *tgt;
tgt               169 drivers/firewire/sbp2.c static struct fw_device *target_parent_device(struct sbp2_target *tgt)
tgt               171 drivers/firewire/sbp2.c 	return fw_parent_device(tgt->unit);
tgt               174 drivers/firewire/sbp2.c static const struct device *tgt_dev(const struct sbp2_target *tgt)
tgt               176 drivers/firewire/sbp2.c 	return &tgt->unit->device;
tgt               181 drivers/firewire/sbp2.c 	return &lu->tgt->unit->device;
tgt               435 drivers/firewire/sbp2.c 	spin_lock_irqsave(&lu->tgt->lock, flags);
tgt               444 drivers/firewire/sbp2.c 	spin_unlock_irqrestore(&lu->tgt->lock, flags);
tgt               471 drivers/firewire/sbp2.c 	spin_lock_irqsave(&orb->lu->tgt->lock, flags);
tgt               477 drivers/firewire/sbp2.c 		spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
tgt               482 drivers/firewire/sbp2.c 		spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
tgt               491 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt               499 drivers/firewire/sbp2.c 	spin_lock_irqsave(&lu->tgt->lock, flags);
tgt               501 drivers/firewire/sbp2.c 	spin_unlock_irqrestore(&lu->tgt->lock, flags);
tgt               513 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt               519 drivers/firewire/sbp2.c 	spin_lock_irq(&lu->tgt->lock);
tgt               521 drivers/firewire/sbp2.c 	spin_unlock_irq(&lu->tgt->lock);
tgt               551 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt               590 drivers/firewire/sbp2.c 		timeout = lu->tgt->mgt_orb_timeout;
tgt               605 drivers/firewire/sbp2.c 		      lu->tgt->management_agent_address);
tgt               647 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt               651 drivers/firewire/sbp2.c 			   lu->tgt->node_id, lu->generation, device->max_speed,
tgt               664 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt               673 drivers/firewire/sbp2.c 			lu->tgt->node_id, lu->generation, device->max_speed,
tgt               678 drivers/firewire/sbp2.c static inline void sbp2_allow_block(struct sbp2_target *tgt)
tgt               680 drivers/firewire/sbp2.c 	spin_lock_irq(&tgt->lock);
tgt               681 drivers/firewire/sbp2.c 	--tgt->dont_block;
tgt               682 drivers/firewire/sbp2.c 	spin_unlock_irq(&tgt->lock);
tgt               697 drivers/firewire/sbp2.c 	struct sbp2_target *tgt = lu->tgt;
tgt               698 drivers/firewire/sbp2.c 	struct fw_card *card = target_parent_device(tgt)->card;
tgt               700 drivers/firewire/sbp2.c 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
tgt               703 drivers/firewire/sbp2.c 	spin_lock_irqsave(&tgt->lock, flags);
tgt               704 drivers/firewire/sbp2.c 	if (!tgt->dont_block && !lu->blocked &&
tgt               707 drivers/firewire/sbp2.c 		if (++tgt->blocked == 1)
tgt               710 drivers/firewire/sbp2.c 	spin_unlock_irqrestore(&tgt->lock, flags);
tgt               721 drivers/firewire/sbp2.c 	struct sbp2_target *tgt = lu->tgt;
tgt               722 drivers/firewire/sbp2.c 	struct fw_card *card = target_parent_device(tgt)->card;
tgt               724 drivers/firewire/sbp2.c 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
tgt               727 drivers/firewire/sbp2.c 	spin_lock_irq(&tgt->lock);
tgt               730 drivers/firewire/sbp2.c 		unblock = --tgt->blocked == 0;
tgt               732 drivers/firewire/sbp2.c 	spin_unlock_irq(&tgt->lock);
tgt               744 drivers/firewire/sbp2.c static void sbp2_unblock(struct sbp2_target *tgt)
tgt               747 drivers/firewire/sbp2.c 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
tgt               749 drivers/firewire/sbp2.c 	spin_lock_irq(&tgt->lock);
tgt               750 drivers/firewire/sbp2.c 	++tgt->dont_block;
tgt               751 drivers/firewire/sbp2.c 	spin_unlock_irq(&tgt->lock);
tgt               785 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt               789 drivers/firewire/sbp2.c 			   lu->tgt->node_id, lu->generation, device->max_speed,
tgt               799 drivers/firewire/sbp2.c 	struct sbp2_target *tgt = lu->tgt;
tgt               800 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(tgt);
tgt               824 drivers/firewire/sbp2.c 			dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n",
tgt               827 drivers/firewire/sbp2.c 			sbp2_unblock(lu->tgt);
tgt               832 drivers/firewire/sbp2.c 	tgt->node_id	  = node_id;
tgt               833 drivers/firewire/sbp2.c 	tgt->address_high = local_node_id << 16;
tgt               842 drivers/firewire/sbp2.c 	dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n",
tgt               859 drivers/firewire/sbp2.c 	if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
tgt               862 drivers/firewire/sbp2.c 	shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
tgt               887 drivers/firewire/sbp2.c 	sbp2_allow_block(tgt);
tgt               909 drivers/firewire/sbp2.c 	struct sbp2_target *tgt = lu->tgt;
tgt               910 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(tgt);
tgt               934 drivers/firewire/sbp2.c 			dev_err(tgt_dev(tgt), "failed to reconnect\n");
tgt               943 drivers/firewire/sbp2.c 	tgt->node_id      = node_id;
tgt               944 drivers/firewire/sbp2.c 	tgt->address_high = local_node_id << 16;
tgt               948 drivers/firewire/sbp2.c 	dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n",
tgt               963 drivers/firewire/sbp2.c static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
tgt               981 drivers/firewire/sbp2.c 	lu->tgt      = tgt;
tgt               987 drivers/firewire/sbp2.c 	++tgt->dont_block;
tgt               992 drivers/firewire/sbp2.c 	list_add_tail(&lu->link, &tgt->lu_list);
tgt               996 drivers/firewire/sbp2.c static void sbp2_get_unit_unique_id(struct sbp2_target *tgt,
tgt              1000 drivers/firewire/sbp2.c 		tgt->guid = (u64)leaf[1] << 32 | leaf[2];
tgt              1003 drivers/firewire/sbp2.c static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
tgt              1012 drivers/firewire/sbp2.c 		    sbp2_add_logical_unit(tgt, value) < 0)
tgt              1017 drivers/firewire/sbp2.c static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
tgt              1028 drivers/firewire/sbp2.c 			tgt->management_agent_address =
tgt              1033 drivers/firewire/sbp2.c 			tgt->directory_id = value;
tgt              1046 drivers/firewire/sbp2.c 			tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
tgt              1050 drivers/firewire/sbp2.c 			if (sbp2_add_logical_unit(tgt, value) < 0)
tgt              1055 drivers/firewire/sbp2.c 			sbp2_get_unit_unique_id(tgt, ci.p - 1 + value);
tgt              1060 drivers/firewire/sbp2.c 			if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
tgt              1073 drivers/firewire/sbp2.c static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
tgt              1075 drivers/firewire/sbp2.c 	unsigned int timeout = tgt->mgt_orb_timeout;
tgt              1078 drivers/firewire/sbp2.c 		dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n",
tgt              1081 drivers/firewire/sbp2.c 	tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
tgt              1084 drivers/firewire/sbp2.c static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
tgt              1091 drivers/firewire/sbp2.c 		dev_notice(tgt_dev(tgt),
tgt              1113 drivers/firewire/sbp2.c 		dev_notice(tgt_dev(tgt), "workarounds 0x%x "
tgt              1116 drivers/firewire/sbp2.c 	tgt->workarounds = w;
tgt              1125 drivers/firewire/sbp2.c 	struct sbp2_target *tgt;
tgt              1134 drivers/firewire/sbp2.c 	shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
tgt              1138 drivers/firewire/sbp2.c 	tgt = (struct sbp2_target *)shost->hostdata;
tgt              1139 drivers/firewire/sbp2.c 	dev_set_drvdata(&unit->device, tgt);
tgt              1140 drivers/firewire/sbp2.c 	tgt->unit = unit;
tgt              1141 drivers/firewire/sbp2.c 	INIT_LIST_HEAD(&tgt->lu_list);
tgt              1142 drivers/firewire/sbp2.c 	spin_lock_init(&tgt->lock);
tgt              1143 drivers/firewire/sbp2.c 	tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
tgt              1155 drivers/firewire/sbp2.c 	tgt->directory_id = ((unit->directory - device->config_rom) * 4
tgt              1161 drivers/firewire/sbp2.c 	if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
tgt              1165 drivers/firewire/sbp2.c 	sbp2_clamp_management_orb_timeout(tgt);
tgt              1166 drivers/firewire/sbp2.c 	sbp2_init_workarounds(tgt, model, firmware_revision);
tgt              1174 drivers/firewire/sbp2.c 	tgt->max_payload = min3(device->max_speed + 7, 10U,
tgt              1178 drivers/firewire/sbp2.c 	list_for_each_entry(lu, &tgt->lu_list, link)
tgt              1194 drivers/firewire/sbp2.c 	struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
tgt              1203 drivers/firewire/sbp2.c 	list_for_each_entry(lu, &tgt->lu_list, link) {
tgt              1213 drivers/firewire/sbp2.c 	struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
tgt              1216 drivers/firewire/sbp2.c 		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
tgt              1220 drivers/firewire/sbp2.c 	sbp2_unblock(tgt);
tgt              1222 drivers/firewire/sbp2.c 	list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
tgt              1339 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(base_orb->lu->tgt);
tgt              1400 drivers/firewire/sbp2.c 			cpu_to_be32(lu->tgt->address_high);
tgt              1426 drivers/firewire/sbp2.c 	orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
tgt              1445 drivers/firewire/sbp2.c 	struct fw_device *device = target_parent_device(lu->tgt);
tgt              1459 drivers/firewire/sbp2.c 		COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
tgt              1483 drivers/firewire/sbp2.c 	sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
tgt              1507 drivers/firewire/sbp2.c 	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
tgt              1526 drivers/firewire/sbp2.c 	    lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
tgt              1529 drivers/firewire/sbp2.c 	if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
tgt              1532 drivers/firewire/sbp2.c 	if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
tgt              1535 drivers/firewire/sbp2.c 	if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
tgt              1575 drivers/firewire/sbp2.c 			(unsigned long long)lu->tgt->guid,
tgt              1576 drivers/firewire/sbp2.c 			lu->tgt->directory_id, lu->lun);
tgt               177 drivers/firmware/iscsi_ibft.c 		struct ibft_tgt *tgt;
tgt               355 drivers/firmware/iscsi_ibft.c 	struct ibft_tgt *tgt = entry->tgt;
tgt               360 drivers/firmware/iscsi_ibft.c 	if (!tgt)
tgt               365 drivers/firmware/iscsi_ibft.c 		str += sprintf(str, "%d\n", tgt->hdr.index);
tgt               368 drivers/firmware/iscsi_ibft.c 		str += sprintf(str, "%d\n", tgt->hdr.flags);
tgt               371 drivers/firmware/iscsi_ibft.c 		str += sprintf_ipaddr(str, tgt->ip_addr);
tgt               374 drivers/firmware/iscsi_ibft.c 		str += sprintf(str, "%d\n", tgt->port);
tgt               378 drivers/firmware/iscsi_ibft.c 			str += sprintf(str, "%x", (u8)tgt->lun[i]);
tgt               382 drivers/firmware/iscsi_ibft.c 		str += sprintf(str, "%d\n", tgt->nic_assoc);
tgt               385 drivers/firmware/iscsi_ibft.c 		str += sprintf(str, "%d\n", tgt->chap_type);
tgt               388 drivers/firmware/iscsi_ibft.c 		str += sprintf_string(str, tgt->tgt_name_len,
tgt               389 drivers/firmware/iscsi_ibft.c 				      (char *)ibft_loc + tgt->tgt_name_off);
tgt               392 drivers/firmware/iscsi_ibft.c 		str += sprintf_string(str, tgt->chap_name_len,
tgt               393 drivers/firmware/iscsi_ibft.c 				      (char *)ibft_loc + tgt->chap_name_off);
tgt               396 drivers/firmware/iscsi_ibft.c 		str += sprintf_string(str, tgt->chap_secret_len,
tgt               397 drivers/firmware/iscsi_ibft.c 				      (char *)ibft_loc + tgt->chap_secret_off);
tgt               400 drivers/firmware/iscsi_ibft.c 		str += sprintf_string(str, tgt->rev_chap_name_len,
tgt               402 drivers/firmware/iscsi_ibft.c 				      tgt->rev_chap_name_off);
tgt               405 drivers/firmware/iscsi_ibft.c 		str += sprintf_string(str, tgt->rev_chap_secret_len,
tgt               407 drivers/firmware/iscsi_ibft.c 				      tgt->rev_chap_secret_off);
tgt               528 drivers/firmware/iscsi_ibft.c 	struct ibft_tgt *tgt = entry->tgt;
tgt               542 drivers/firmware/iscsi_ibft.c 		if (tgt->tgt_name_len)
tgt               547 drivers/firmware/iscsi_ibft.c 		if (tgt->chap_name_len)
tgt               552 drivers/firmware/iscsi_ibft.c 		if (tgt->rev_chap_name_len)
tgt               677 drivers/firmware/iscsi_ibft.c 				     sizeof(*ibft_kobj->tgt));
tgt                40 drivers/lightnvm/core.c 	struct nvm_target *tgt;
tgt                42 drivers/lightnvm/core.c 	list_for_each_entry(tgt, &dev->targets, list)
tgt                43 drivers/lightnvm/core.c 		if (!strcmp(name, tgt->disk->disk_name))
tgt                44 drivers/lightnvm/core.c 			return tgt;
tgt                52 drivers/lightnvm/core.c 	struct nvm_target *tgt;
tgt                58 drivers/lightnvm/core.c 		list_for_each_entry(tgt, &dev->targets, list) {
tgt                59 drivers/lightnvm/core.c 			if (!strcmp(name, tgt->disk->disk_name)) {
tgt              1274 drivers/lightnvm/core.c 		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
tgt              1276 drivers/lightnvm/core.c 		tgt->version[0] = tt->version[0];
tgt              1277 drivers/lightnvm/core.c 		tgt->version[1] = tt->version[1];
tgt              1278 drivers/lightnvm/core.c 		tgt->version[2] = tt->version[2];
tgt              1279 drivers/lightnvm/core.c 		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
tgt               244 drivers/md/dm-table.c 		struct dm_target *tgt = t->targets + i;
tgt               246 drivers/md/dm-table.c 		if (tgt->type->dtr)
tgt               247 drivers/md/dm-table.c 			tgt->type->dtr(tgt);
tgt               249 drivers/md/dm-table.c 		dm_put_target_type(tgt->type);
tgt               713 drivers/md/dm-table.c 	struct dm_target *tgt;
tgt               723 drivers/md/dm-table.c 	tgt = t->targets + t->num_targets;
tgt               724 drivers/md/dm-table.c 	memset(tgt, 0, sizeof(*tgt));
tgt               731 drivers/md/dm-table.c 	tgt->type = dm_get_target_type(type);
tgt               732 drivers/md/dm-table.c 	if (!tgt->type) {
tgt               737 drivers/md/dm-table.c 	if (dm_target_needs_singleton(tgt->type)) {
tgt               739 drivers/md/dm-table.c 			tgt->error = "singleton target type must appear alone in table";
tgt               745 drivers/md/dm-table.c 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
tgt               746 drivers/md/dm-table.c 		tgt->error = "target type may not be included in a read-only table";
tgt               751 drivers/md/dm-table.c 		if (t->immutable_target_type != tgt->type) {
tgt               752 drivers/md/dm-table.c 			tgt->error = "immutable target type cannot be mixed with other target types";
tgt               755 drivers/md/dm-table.c 	} else if (dm_target_is_immutable(tgt->type)) {
tgt               757 drivers/md/dm-table.c 			tgt->error = "immutable target type cannot be mixed with other target types";
tgt               760 drivers/md/dm-table.c 		t->immutable_target_type = tgt->type;
tgt               763 drivers/md/dm-table.c 	if (dm_target_has_integrity(tgt->type))
tgt               766 drivers/md/dm-table.c 	tgt->table = t;
tgt               767 drivers/md/dm-table.c 	tgt->begin = start;
tgt               768 drivers/md/dm-table.c 	tgt->len = len;
tgt               769 drivers/md/dm-table.c 	tgt->error = "Unknown error";
tgt               774 drivers/md/dm-table.c 	if (!adjoin(t, tgt)) {
tgt               775 drivers/md/dm-table.c 		tgt->error = "Gap in table";
tgt               781 drivers/md/dm-table.c 		tgt->error = "couldn't split parameters (insufficient memory)";
tgt               785 drivers/md/dm-table.c 	r = tgt->type->ctr(tgt, argc, argv);
tgt               790 drivers/md/dm-table.c 	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
tgt               792 drivers/md/dm-table.c 	if (!tgt->num_discard_bios && tgt->discards_supported)
tgt               799 drivers/md/dm-table.c 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
tgt               800 drivers/md/dm-table.c 	dm_put_target_type(tgt->type);
tgt               945 drivers/md/dm-table.c 	struct dm_target *tgt;
tgt               962 drivers/md/dm-table.c 		tgt = t->targets + i;
tgt               963 drivers/md/dm-table.c 		if (dm_target_hybrid(tgt))
tgt               965 drivers/md/dm-table.c 		else if (dm_target_request_based(tgt))
tgt               998 drivers/md/dm-table.c 			tgt = dm_table_get_immutable_target(t);
tgt               999 drivers/md/dm-table.c 			if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
tgt              1037 drivers/md/dm-table.c 	tgt = dm_table_get_immutable_target(t);
tgt              1038 drivers/md/dm-table.c 	if (!tgt) {
tgt              1041 drivers/md/dm-table.c 	} else if (tgt->max_io_len) {
tgt              1047 drivers/md/dm-table.c 	if (!tgt->type->iterate_devices ||
tgt              1048 drivers/md/dm-table.c 	    !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
tgt               448 drivers/md/dm.c 	struct dm_target *tgt;
tgt               459 drivers/md/dm.c 	tgt = dm_table_find_target(map, sector);
tgt               460 drivers/md/dm.c 	if (!tgt) {
tgt               471 drivers/md/dm.c 	if (WARN_ON(!tgt->type->report_zones)) {
tgt               482 drivers/md/dm.c 	ret = tgt->type->report_zones(tgt, sector, zones, nr_zones);
tgt               496 drivers/md/dm.c 	struct dm_target *tgt;
tgt               510 drivers/md/dm.c 	tgt = dm_table_get_target(map, 0);
tgt               511 drivers/md/dm.c 	if (!tgt->type->prepare_ioctl)
tgt               517 drivers/md/dm.c 	r = tgt->type->prepare_ioctl(tgt, bdev);
tgt              1362 drivers/md/raid5.c 	struct r5dev *tgt;
tgt              1367 drivers/md/raid5.c 	tgt = &sh->dev[target];
tgt              1368 drivers/md/raid5.c 	set_bit(R5_UPTODATE, &tgt->flags);
tgt              1369 drivers/md/raid5.c 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
tgt              1370 drivers/md/raid5.c 	clear_bit(R5_Wantcompute, &tgt->flags);
tgt              1410 drivers/md/raid5.c 	struct r5dev *tgt = &sh->dev[target];
tgt              1411 drivers/md/raid5.c 	struct page *xor_dest = tgt->page;
tgt              1421 drivers/md/raid5.c 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
tgt              1495 drivers/md/raid5.c 	struct r5dev *tgt;
tgt              1512 drivers/md/raid5.c 	tgt = &sh->dev[target];
tgt              1513 drivers/md/raid5.c 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
tgt              1514 drivers/md/raid5.c 	dest = tgt->page;
tgt              1553 drivers/md/raid5.c 	struct r5dev *tgt = &sh->dev[target];
tgt              1563 drivers/md/raid5.c 	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
tgt                15 drivers/media/dvb-frontends/cxd2880/cxd2880_devio_spi.c 				   enum cxd2880_io_tgt tgt,
tgt                32 drivers/media/dvb-frontends/cxd2880/cxd2880_devio_spi.c 	if (tgt == CXD2880_IO_TGT_SYS)
tgt                63 drivers/media/dvb-frontends/cxd2880/cxd2880_devio_spi.c 				    enum cxd2880_io_tgt tgt,
tgt                83 drivers/media/dvb-frontends/cxd2880/cxd2880_devio_spi.c 	if (tgt == CXD2880_IO_TGT_SYS)
tgt                97 drivers/media/dvb-frontends/cxd2880/cxd2880_devio_spi.c 		if (tgt == CXD2880_IO_TGT_SYS) {
tgt                13 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 				    enum cxd2880_io_tgt tgt,
tgt                19 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 	return io->write_regs(io, tgt, sub_address, &data, 1);
tgt                23 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 			    enum cxd2880_io_tgt tgt,
tgt                37 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 		ret = io->read_regs(io, tgt, sub_address, &rdata, 1);
tgt                44 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 	return io->write_reg(io, tgt, sub_address, data);
tgt                48 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 			     enum cxd2880_io_tgt tgt,
tgt                59 drivers/media/dvb-frontends/cxd2880/cxd2880_io.c 		ret = io->write_reg(io, tgt, reg_value[i].addr,
tgt                27 drivers/media/dvb-frontends/cxd2880/cxd2880_io.h 			 enum cxd2880_io_tgt tgt, u8 sub_address,
tgt                30 drivers/media/dvb-frontends/cxd2880/cxd2880_io.h 			  enum cxd2880_io_tgt tgt, u8 sub_address,
tgt                33 drivers/media/dvb-frontends/cxd2880/cxd2880_io.h 			 enum cxd2880_io_tgt tgt, u8 sub_address,
tgt                43 drivers/media/dvb-frontends/cxd2880/cxd2880_io.h 				    enum cxd2880_io_tgt tgt,
tgt                47 drivers/media/dvb-frontends/cxd2880/cxd2880_io.h 			    enum cxd2880_io_tgt tgt,
tgt                51 drivers/media/dvb-frontends/cxd2880/cxd2880_io.h 				enum cxd2880_io_tgt tgt,
tgt              1858 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 					     tnr_dmd->cfg_mem[i].tgt,
tgt              1864 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 					      tnr_dmd->cfg_mem[i].tgt,
tgt              1876 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 		       enum cxd2880_io_tgt tgt,
tgt              1887 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 		    tnr_dmd->cfg_mem[i].tgt == tgt &&
tgt              1903 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 		tnr_dmd->cfg_mem[tnr_dmd->cfg_mem_last_entry].tgt = tgt;
tgt              3247 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 					 enum cxd2880_io_tgt tgt,
tgt              3260 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 	ret = tnr_dmd->io->write_reg(tnr_dmd->io, tgt, 0x00, bank);
tgt              3265 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 				      tgt, address, value, bit_mask);
tgt              3269 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.c 	return set_cfg_mem(tnr_dmd, tgt, bank, address, value, bit_mask);
tgt               153 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.h 	enum cxd2880_io_tgt tgt;
tgt               318 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd.h 					 enum cxd2880_io_tgt tgt,
tgt               198 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	    int tgt, int act, int tok, u64 offset, size_t size, int width)
tgt               203 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	if (tgt >= NFP_CPP_NUM_TARGETS)
tgt               229 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
tgt               244 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
tgt               295 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		int tgt, int act, int tok, u64 offset, size_t size, int width)
tgt               302 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 			  tgt, act, tok, offset, size, width);
tgt               312 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
tgt               360 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	if ((bartgt < 0 || bartgt == tgt) &&
tgt               373 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		  u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
tgt               380 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		if (matching_bar(bar, tgt, act, tok, offset, size, width))
tgt               390 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 			int tgt, int act, int tok,
tgt               404 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 				  tgt, act, tok, offset, size, width);
tgt               415 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		 tgt, act, tok, offset, size, width))
tgt               423 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 			 int tgt, int act, int tok,
tgt               431 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
tgt               453 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
tgt               456 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		(*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
tgt               463 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	      u32 tgt, u32 act, u32 tok,
tgt               473 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
tgt               481 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 	barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
tgt               492 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 		retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
tgt               501 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c 				 tgt, act, tok, offset, size, width);
tgt              1169 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c 	size_t tgt;
tgt              1227 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c 	for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
tgt              1229 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c 		xpbaddr = 0x000a0000 + (tgt * 4);
tgt              1231 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c 				    &cpp->imb_cat_table[tgt]);
tgt               729 drivers/pci/controller/pci-mvebu.c 			      unsigned int *tgt,
tgt               736 drivers/pci/controller/pci-mvebu.c 	*tgt = -1;
tgt               761 drivers/pci/controller/pci-mvebu.c 			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
tgt              1352 drivers/scsi/BusLogic.c 	unsigned char *tgt, ch;
tgt              1362 drivers/scsi/BusLogic.c 		tgt = adapter->model;
tgt              1363 drivers/scsi/BusLogic.c 		*tgt++ = 'B';
tgt              1364 drivers/scsi/BusLogic.c 		*tgt++ = 'T';
tgt              1365 drivers/scsi/BusLogic.c 		*tgt++ = '-';
tgt              1367 drivers/scsi/BusLogic.c 			*tgt++ = fpinfo->model[i];
tgt              1368 drivers/scsi/BusLogic.c 		*tgt++ = '\0';
tgt              1482 drivers/scsi/BusLogic.c 	tgt = adapter->model;
tgt              1483 drivers/scsi/BusLogic.c 	*tgt++ = 'B';
tgt              1484 drivers/scsi/BusLogic.c 	*tgt++ = 'T';
tgt              1485 drivers/scsi/BusLogic.c 	*tgt++ = '-';
tgt              1490 drivers/scsi/BusLogic.c 		*tgt++ = ch;
tgt              1492 drivers/scsi/BusLogic.c 	*tgt++ = '\0';
tgt              1496 drivers/scsi/BusLogic.c 	tgt = adapter->fw_ver;
tgt              1497 drivers/scsi/BusLogic.c 	*tgt++ = id.fw_ver_digit1;
tgt              1498 drivers/scsi/BusLogic.c 	*tgt++ = '.';
tgt              1499 drivers/scsi/BusLogic.c 	*tgt++ = id.fw_ver_digit2;
tgt              1501 drivers/scsi/BusLogic.c 		*tgt++ = fw_ver_digit3;
tgt              1502 drivers/scsi/BusLogic.c 	*tgt = '\0';
tgt              1513 drivers/scsi/BusLogic.c 			*tgt++ = fw_ver_letter;
tgt              1514 drivers/scsi/BusLogic.c 		*tgt = '\0';
tgt              3477 drivers/scsi/BusLogic.c 	int tgt;
tgt              3489 drivers/scsi/BusLogic.c 	for (tgt = 0; tgt < adapter->maxdev; tgt++) {
tgt              3490 drivers/scsi/BusLogic.c 		struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
tgt              3493 drivers/scsi/BusLogic.c 		seq_printf(m, "  %2d	%s", tgt, (tgt_flags->tagq_ok ? (tgt_flags->tagq_active ? "    Active" : (adapter->tagq_ok & (1 << tgt)
tgt              3497 drivers/scsi/BusLogic.c 				  "	    %3d       %3u    %9u	%9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete);
tgt              3502 drivers/scsi/BusLogic.c 	for (tgt = 0; tgt < adapter->maxdev; tgt++) {
tgt              3503 drivers/scsi/BusLogic.c 		struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
tgt              3506 drivers/scsi/BusLogic.c 		seq_printf(m, "  %2d	  %9u	 %9u", tgt, tgt_stats[tgt].read_cmds, tgt_stats[tgt].write_cmds);
tgt              3507 drivers/scsi/BusLogic.c 		if (tgt_stats[tgt].bytesread.billions > 0)
tgt              3508 drivers/scsi/BusLogic.c 			seq_printf(m, "     %9u%09u", tgt_stats[tgt].bytesread.billions, tgt_stats[tgt].bytesread.units);
tgt              3510 drivers/scsi/BusLogic.c 			seq_printf(m, "		%9u", tgt_stats[tgt].bytesread.units);
tgt              3511 drivers/scsi/BusLogic.c 		if (tgt_stats[tgt].byteswritten.billions > 0)
tgt              3512 drivers/scsi/BusLogic.c 			seq_printf(m, "   %9u%09u\n", tgt_stats[tgt].byteswritten.billions, tgt_stats[tgt].byteswritten.units);
tgt              3514 drivers/scsi/BusLogic.c 			seq_printf(m, "	     %9u\n", tgt_stats[tgt].byteswritten.units);
tgt              3519 drivers/scsi/BusLogic.c 	for (tgt = 0; tgt < adapter->maxdev; tgt++) {
tgt              3520 drivers/scsi/BusLogic.c 		struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
tgt              3524 drivers/scsi/BusLogic.c 			    "  %2d	 Read	 %9u  %9u  %9u  %9u  %9u\n", tgt,
tgt              3525 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].read_sz_buckets[0],
tgt              3526 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].read_sz_buckets[1], tgt_stats[tgt].read_sz_buckets[2], tgt_stats[tgt].read_sz_buckets[3], tgt_stats[tgt].read_sz_buckets[4]);
tgt              3528 drivers/scsi/BusLogic.c 			    "  %2d	 Write	 %9u  %9u  %9u  %9u  %9u\n", tgt,
tgt              3529 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].write_sz_buckets[0],
tgt              3530 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]);
tgt              3535 drivers/scsi/BusLogic.c 	for (tgt = 0; tgt < adapter->maxdev; tgt++) {
tgt              3536 drivers/scsi/BusLogic.c 		struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
tgt              3540 drivers/scsi/BusLogic.c 			    "  %2d	 Read	 %9u  %9u  %9u  %9u  %9u\n", tgt,
tgt              3541 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].read_sz_buckets[5],
tgt              3542 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].read_sz_buckets[6], tgt_stats[tgt].read_sz_buckets[7], tgt_stats[tgt].read_sz_buckets[8], tgt_stats[tgt].read_sz_buckets[9]);
tgt              3544 drivers/scsi/BusLogic.c 			    "  %2d	 Write	 %9u  %9u  %9u  %9u  %9u\n", tgt,
tgt              3545 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].write_sz_buckets[5],
tgt              3546 drivers/scsi/BusLogic.c 			    tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]);
tgt              3555 drivers/scsi/BusLogic.c 	for (tgt = 0; tgt < adapter->maxdev; tgt++) {
tgt              3556 drivers/scsi/BusLogic.c 		struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
tgt              3560 drivers/scsi/BusLogic.c 			   tgt, tgt_stats[tgt].aborts_request,
tgt              3561 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].aborts_tried,
tgt              3562 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].aborts_done,
tgt              3563 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].bdr_request,
tgt              3564 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].bdr_tried,
tgt              3565 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].bdr_done,
tgt              3566 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].adapter_reset_req,
tgt              3567 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].adapter_reset_attempt,
tgt              3568 drivers/scsi/BusLogic.c 			   tgt_stats[tgt].adapter_reset_done);
tgt               430 drivers/scsi/bnx2fc/bnx2fc.h 	struct bnx2fc_rport *tgt;
tgt               484 drivers/scsi/bnx2fc/bnx2fc.h 	struct bnx2fc_rport *tgt;
tgt               496 drivers/scsi/bnx2fc/bnx2fc.h struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
tgt               497 drivers/scsi/bnx2fc/bnx2fc.h struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
tgt               503 drivers/scsi/bnx2fc/bnx2fc.h 					struct bnx2fc_rport *tgt);
tgt               505 drivers/scsi/bnx2fc/bnx2fc.h 					struct bnx2fc_rport *tgt);
tgt               507 drivers/scsi/bnx2fc/bnx2fc.h 				    struct bnx2fc_rport *tgt);
tgt               509 drivers/scsi/bnx2fc/bnx2fc.h 					struct bnx2fc_rport *tgt);
tgt               510 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
tgt               520 drivers/scsi/bnx2fc/bnx2fc.h char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
tgt               521 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
tgt               524 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
tgt               525 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
tgt               526 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
tgt               543 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
tgt               544 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
tgt               571 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
tgt               578 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
tgt               579 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
tgt               580 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
tgt               583 drivers/scsi/bnx2fc/bnx2fc.h void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
tgt               587 drivers/scsi/bnx2fc/bnx2fc.h int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
tgt                41 drivers/scsi/bnx2fc/bnx2fc_debug.c void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...)
tgt                54 drivers/scsi/bnx2fc/bnx2fc_debug.c 	if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host &&
tgt                55 drivers/scsi/bnx2fc/bnx2fc_debug.c 	    tgt->rport)
tgt                56 drivers/scsi/bnx2fc/bnx2fc_debug.c 		shost_printk(KERN_INFO, tgt->port->lport->host,
tgt                58 drivers/scsi/bnx2fc/bnx2fc_debug.c 			     tgt->rport->port_id, &vaf);
tgt                43 drivers/scsi/bnx2fc/bnx2fc_debug.h void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...);
tgt                23 drivers/scsi/bnx2fc/bnx2fc_els.c static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
tgt                64 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct bnx2fc_rport *tgt = aborted_io_req->tgt;
tgt                72 drivers/scsi/bnx2fc/bnx2fc_els.c 	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
tgt                75 drivers/scsi/bnx2fc/bnx2fc_els.c 	lport = tgt->rdata->local_port;
tgt                76 drivers/scsi/bnx2fc/bnx2fc_els.c 	sid = tgt->sid;
tgt                98 drivers/scsi/bnx2fc/bnx2fc_els.c 	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
tgt               115 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               117 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               125 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct bnx2fc_rport *tgt;
tgt               153 drivers/scsi/bnx2fc/bnx2fc_els.c 	tgt = els_req->tgt;
tgt               174 drivers/scsi/bnx2fc/bnx2fc_els.c 	bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
tgt               182 drivers/scsi/bnx2fc/bnx2fc_els.c int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
tgt               187 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fc_lport *lport = tgt->rdata->local_port;
tgt               203 drivers/scsi/bnx2fc/bnx2fc_els.c 	rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
tgt               210 drivers/scsi/bnx2fc/bnx2fc_els.c int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
tgt               215 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fc_lport *lport = tgt->rdata->local_port;
tgt               231 drivers/scsi/bnx2fc/bnx2fc_els.c 	rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
tgt               238 drivers/scsi/bnx2fc/bnx2fc_els.c int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
tgt               243 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fc_lport *lport = tgt->rdata->local_port;
tgt               258 drivers/scsi/bnx2fc/bnx2fc_els.c 	rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
tgt               299 drivers/scsi/bnx2fc/bnx2fc_els.c 			struct bnx2fc_rport *tgt = orig_io_req->tgt;
tgt               300 drivers/scsi/bnx2fc/bnx2fc_els.c 			spin_unlock_bh(&tgt->tgt_lock);
tgt               304 drivers/scsi/bnx2fc/bnx2fc_els.c 			spin_lock_bh(&tgt->tgt_lock);
tgt               383 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct bnx2fc_rport *tgt;
tgt               402 drivers/scsi/bnx2fc/bnx2fc_els.c 	tgt = orig_io_req->tgt;
tgt               419 drivers/scsi/bnx2fc/bnx2fc_els.c 			spin_unlock_bh(&tgt->tgt_lock);
tgt               421 drivers/scsi/bnx2fc/bnx2fc_els.c 			spin_lock_bh(&tgt->tgt_lock);
tgt               481 drivers/scsi/bnx2fc/bnx2fc_els.c 			new_io_req = bnx2fc_cmd_alloc(tgt);
tgt               491 drivers/scsi/bnx2fc/bnx2fc_els.c 			rc = bnx2fc_post_io_req(tgt, new_io_req);
tgt               563 drivers/scsi/bnx2fc/bnx2fc_els.c 			spin_unlock_bh(&tgt->tgt_lock);
tgt               565 drivers/scsi/bnx2fc/bnx2fc_els.c 			spin_lock_bh(&tgt->tgt_lock);
tgt               585 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
tgt               586 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fc_lport *lport = tgt->rdata->local_port;
tgt               588 drivers/scsi/bnx2fc/bnx2fc_els.c 	u32 sid = tgt->sid;
tgt               610 drivers/scsi/bnx2fc/bnx2fc_els.c 	rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
tgt               615 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               617 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               627 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
tgt               628 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fc_lport *lport = tgt->rdata->local_port;
tgt               654 drivers/scsi/bnx2fc/bnx2fc_els.c 	rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
tgt               659 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               661 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               670 drivers/scsi/bnx2fc/bnx2fc_els.c static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
tgt               675 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fcoe_port *port = tgt->port;
tgt               677 drivers/scsi/bnx2fc/bnx2fc_els.c 	struct fc_rport *rport = tgt->rport;
tgt               700 drivers/scsi/bnx2fc/bnx2fc_els.c 	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
tgt               705 drivers/scsi/bnx2fc/bnx2fc_els.c 	els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
tgt               713 drivers/scsi/bnx2fc/bnx2fc_els.c 	els_req->tgt = tgt;
tgt               723 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               725 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               744 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               746 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               756 drivers/scsi/bnx2fc/bnx2fc_els.c 	did = tgt->rport->port_id;
tgt               757 drivers/scsi/bnx2fc/bnx2fc_els.c 	sid = tgt->sid;
tgt               779 drivers/scsi/bnx2fc/bnx2fc_els.c 	spin_lock_bh(&tgt->tgt_lock);
tgt               781 drivers/scsi/bnx2fc/bnx2fc_els.c 	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
tgt               786 drivers/scsi/bnx2fc/bnx2fc_els.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               792 drivers/scsi/bnx2fc/bnx2fc_els.c 	bnx2fc_add_2_sq(tgt, xid);
tgt               795 drivers/scsi/bnx2fc/bnx2fc_els.c 	list_add_tail(&els_req->link, &tgt->els_queue);
tgt               798 drivers/scsi/bnx2fc/bnx2fc_els.c 	bnx2fc_ring_doorbell(tgt);
tgt               799 drivers/scsi/bnx2fc/bnx2fc_els.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               192 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_rport *tgt;
tgt               199 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		tgt = hba->tgt_ofld_list[i];
tgt               200 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		if (tgt) {
tgt               202 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			if (tgt->port == port) {
tgt               204 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 				BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
tgt               205 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 				bnx2fc_flush_active_ios(tgt);
tgt               214 drivers/scsi/bnx2fc/bnx2fc_fcoe.c static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
tgt               217 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct fc_rport_priv *rdata = tgt->rdata;
tgt               222 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
tgt               230 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			rc = bnx2fc_send_adisc(tgt, fp);
tgt               233 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			rc = bnx2fc_send_logo(tgt, fp);
tgt               236 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 			rc = bnx2fc_send_rls(tgt, fp);
tgt               243 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
tgt               245 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
tgt               273 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	struct bnx2fc_rport	*tgt;
tgt               318 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
tgt               319 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 	if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
tgt               324 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		rc = bnx2fc_xmit_l2_frame(tgt, fp);
tgt               663 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 				bnx2fc_process_cq_compl(work->tgt, work->wqe);
tgt              2658 drivers/scsi/bnx2fc/bnx2fc_fcoe.c 		bnx2fc_process_cq_compl(work->tgt, work->wqe);
tgt               172 drivers/scsi/bnx2fc/bnx2fc_hwi.c 					struct bnx2fc_rport *tgt)
tgt               183 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fc_rport_priv *rdata = tgt->rdata;
tgt               184 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fc_rport *rport = tgt->rport;
tgt               198 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	conn_id = (u16)tgt->fcoe_conn_id;
tgt               202 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
tgt               203 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
tgt               205 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
tgt               206 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
tgt               208 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
tgt               210 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				(u32)((u64) tgt->rq_dma >> 32);
tgt               223 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
tgt               224 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
tgt               226 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
tgt               227 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
tgt               229 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
tgt               230 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
tgt               254 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt->sid = port_id;
tgt               273 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
tgt               274 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
tgt               293 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (tgt->dev_type == TYPE_TAPE) {
tgt               332 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
tgt               333 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
tgt               335 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
tgt               337 drivers/scsi/bnx2fc/bnx2fc_hwi.c 					(u32)((u64) tgt->confq_pbl_dma >> 32);
tgt               357 drivers/scsi/bnx2fc/bnx2fc_hwi.c 					struct bnx2fc_rport *tgt)
tgt               365 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fc_rport *rport = tgt->rport;
tgt               383 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
tgt               393 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (port_id != tgt->sid) {
tgt               395 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				"sid = 0x%x\n", port_id, tgt->sid);
tgt               396 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		port_id = tgt->sid;
tgt               410 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	enbl_req.context_id = tgt->context_id;
tgt               411 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	enbl_req.conn_id = tgt->fcoe_conn_id;
tgt               427 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				    struct bnx2fc_rport *tgt)
tgt               434 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fc_rport *rport = tgt->rport;
tgt               445 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
tgt               446 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
tgt               447 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
tgt               448 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
tgt               449 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
tgt               450 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
tgt               459 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	port_id = tgt->sid;
tgt               469 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.context_id = tgt->context_id;
tgt               470 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	disable_req.conn_id = tgt->fcoe_conn_id;
tgt               492 drivers/scsi/bnx2fc/bnx2fc_hwi.c 					struct bnx2fc_rport *tgt)
tgt               504 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	destroy_req.context_id = tgt->context_id;
tgt               505 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	destroy_req.conn_id = tgt->fcoe_conn_id;
tgt               548 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
tgt               552 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fcoe_port *port = tgt->port;
tgt               566 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
tgt               570 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
tgt               626 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
tgt               636 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_interface *interface = tgt->port->priv;
tgt               644 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
tgt               652 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               653 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
tgt               654 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               663 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
tgt               668 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				spin_lock_bh(&tgt->tgt_lock);
tgt               670 drivers/scsi/bnx2fc/bnx2fc_hwi.c 					   bnx2fc_get_next_rqe(tgt, 1);
tgt               671 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				spin_unlock_bh(&tgt->tgt_lock);
tgt               677 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
tgt               682 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               683 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		bnx2fc_return_rqe(tgt, num_rq);
tgt               684 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               692 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               695 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			     bnx2fc_get_next_rqe(tgt, 1);
tgt               697 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
tgt               698 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
tgt               701 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
tgt               706 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
tgt               752 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
tgt               753 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		if (tgt->dev_type != TYPE_TAPE)
tgt               762 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
tgt               768 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				spin_unlock_bh(&tgt->tgt_lock);
tgt               770 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				spin_lock_bh(&tgt->tgt_lock);
tgt               799 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		bnx2fc_return_rqe(tgt, 1);
tgt               800 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               808 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               811 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			     bnx2fc_get_next_rqe(tgt, 1);
tgt               813 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
tgt               814 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
tgt               817 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
tgt               821 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
tgt               834 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
tgt               854 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
tgt               856 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		bnx2fc_return_rqe(tgt, 1);
tgt               857 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               866 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
tgt               870 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fcoe_port *port = tgt->port;
tgt               880 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	spin_lock_bh(&tgt->tgt_lock);
tgt               884 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               900 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               916 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			spin_unlock_bh(&tgt->tgt_lock);
tgt               974 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               977 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
tgt               979 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
tgt               983 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
tgt               986 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	writel(cpu_to_le32(msg), tgt->ctx_base);
tgt               990 drivers/scsi/bnx2fc/bnx2fc_hwi.c static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
tgt               998 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	work->tgt = tgt;
tgt              1004 drivers/scsi/bnx2fc/bnx2fc_hwi.c static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
tgt              1013 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		work = bnx2fc_alloc_work(tgt, wqe);
tgt              1022 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	bnx2fc_process_cq_compl(tgt, wqe);
tgt              1025 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
tgt              1039 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	spin_lock_bh(&tgt->cq_lock);
tgt              1041 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt->cq) {
tgt              1043 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		spin_unlock_bh(&tgt->cq_lock);
tgt              1046 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	cq = tgt->cq;
tgt              1047 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	cq_cons = tgt->cq_cons_idx;
tgt              1051 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	       (tgt->cq_curr_toggle_bit <<
tgt              1057 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			bnx2fc_process_unsol_compl(tgt, wqe);
tgt              1059 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			bnx2fc_pending_work(tgt, wqe);
tgt              1063 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		tgt->cq_cons_idx++;
tgt              1066 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
tgt              1067 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			tgt->cq_cons_idx = 0;
tgt              1069 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			tgt->cq_curr_toggle_bit =
tgt              1070 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				1 - tgt->cq_curr_toggle_bit;
tgt              1075 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		if (tgt->ctx_base)
tgt              1076 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			bnx2fc_arm_cq(tgt);
tgt              1077 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		atomic_add(num_free_sqes, &tgt->free_sqes);
tgt              1079 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	spin_unlock_bh(&tgt->cq_lock);
tgt              1095 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
tgt              1097 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt) {
tgt              1102 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	bnx2fc_process_new_cqes(tgt);
tgt              1117 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport		*tgt;
tgt              1124 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt = hba->tgt_ofld_list[conn_id];
tgt              1125 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt) {
tgt              1129 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
tgt              1131 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	interface = tgt->port->priv;
tgt              1140 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt->context_id = context_id;
tgt              1146 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
tgt              1150 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
tgt              1153 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
tgt              1154 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	wake_up_interruptible(&tgt->ofld_wait);
tgt              1169 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport		*tgt;
tgt              1176 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt = hba->tgt_ofld_list[conn_id];
tgt              1177 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt) {
tgt              1182 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
tgt              1189 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (tgt->context_id != context_id) {
tgt              1193 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	interface = tgt->port->priv;
tgt              1200 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
tgt              1203 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
tgt              1204 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	wake_up_interruptible(&tgt->ofld_wait);
tgt              1211 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport		*tgt;
tgt              1215 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt = hba->tgt_ofld_list[conn_id];
tgt              1216 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt) {
tgt              1221 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
tgt              1226 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
tgt              1227 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
tgt              1228 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		wake_up_interruptible(&tgt->upld_wait);
tgt              1231 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "disable successful\n");
tgt              1232 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
tgt              1233 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
tgt              1234 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
tgt              1235 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
tgt              1236 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		wake_up_interruptible(&tgt->upld_wait);
tgt              1243 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport		*tgt;
tgt              1247 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt = hba->tgt_ofld_list[conn_id];
tgt              1248 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt) {
tgt              1253 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
tgt              1261 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		BNX2FC_TGT_DBG(tgt, "upload successful\n");
tgt              1262 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
tgt              1263 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
tgt              1264 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
tgt              1265 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		wake_up_interruptible(&tgt->upld_wait);
tgt              1375 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
tgt              1379 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	sqe = &tgt->sq[tgt->sq_prod_idx];
tgt              1383 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
tgt              1386 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
tgt              1387 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		tgt->sq_prod_idx = 0;
tgt              1388 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
tgt              1392 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
tgt              1394 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
tgt              1398 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	sq_db->prod = tgt->sq_prod_idx |
tgt              1399 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				(tgt->sq_curr_toggle_bit << 15);
tgt              1401 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	writel(cpu_to_le32(msg), tgt->ctx_base);
tgt              1405 drivers/scsi/bnx2fc/bnx2fc_hwi.c int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
tgt              1407 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	u32 context_id = tgt->context_id;
tgt              1408 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fcoe_port *port = tgt->port;
tgt              1417 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
tgt              1418 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!tgt->ctx_base)
tgt              1423 drivers/scsi/bnx2fc/bnx2fc_hwi.c char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
tgt              1425 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
tgt              1427 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
tgt              1430 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt->rq_cons_idx += num_items;
tgt              1432 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
tgt              1433 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
tgt              1438 drivers/scsi/bnx2fc/bnx2fc_hwi.c void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
tgt              1441 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	u32 next_prod_idx = tgt->rq_prod_idx + num_items;
tgt              1446 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt->rq_prod_idx = next_prod_idx;
tgt              1447 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	tgt->conn_db->rq_prod = tgt->rq_prod_idx;
tgt              1456 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
tgt              1462 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	u32 context_id = tgt->context_id;
tgt              1538 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1539 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	u32 context_id = tgt->context_id;
tgt              1549 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (tgt->dev_type == TYPE_TAPE)
tgt              1575 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1614 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (tgt->dev_type == TYPE_TAPE)
tgt              1636 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	context_id = tgt->context_id;
tgt              1673 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1676 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	int dev_type = tgt->dev_type;
tgt              1761 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	context_id = tgt->context_id;
tgt                45 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt                51 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt                58 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt                87 drivers/scsi/bnx2fc/bnx2fc_io.c 				spin_unlock_bh(&tgt->tgt_lock);
tgt               108 drivers/scsi/bnx2fc/bnx2fc_io.c 				spin_unlock_bh(&tgt->tgt_lock);
tgt               125 drivers/scsi/bnx2fc/bnx2fc_io.c 				spin_unlock_bh(&tgt->tgt_lock);
tgt               159 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               394 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
tgt               396 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fcoe_port *port = tgt->port;
tgt               407 drivers/scsi/bnx2fc/bnx2fc_io.c 	max_sqes = tgt->max_sqes;
tgt               424 drivers/scsi/bnx2fc/bnx2fc_io.c 	free_sqes = atomic_read(&tgt->free_sqes);
tgt               426 drivers/scsi/bnx2fc/bnx2fc_io.c 	    (tgt->num_active_ios.counter  >= max_sqes) ||
tgt               428 drivers/scsi/bnx2fc/bnx2fc_io.c 		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
tgt               430 drivers/scsi/bnx2fc/bnx2fc_io.c 			tgt->num_active_ios.counter, tgt->max_sqes);
tgt               443 drivers/scsi/bnx2fc/bnx2fc_io.c 	atomic_inc(&tgt->num_active_ios);
tgt               444 drivers/scsi/bnx2fc/bnx2fc_io.c 	atomic_dec(&tgt->free_sqes);
tgt               464 drivers/scsi/bnx2fc/bnx2fc_io.c struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
tgt               466 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fcoe_port *port = tgt->port;
tgt               483 drivers/scsi/bnx2fc/bnx2fc_io.c 	free_sqes = atomic_read(&tgt->free_sqes);
tgt               485 drivers/scsi/bnx2fc/bnx2fc_io.c 	    (tgt->num_active_ios.counter  >= max_sqes) ||
tgt               498 drivers/scsi/bnx2fc/bnx2fc_io.c 	atomic_inc(&tgt->num_active_ios);
tgt               499 drivers/scsi/bnx2fc/bnx2fc_io.c 	atomic_dec(&tgt->free_sqes);
tgt               541 drivers/scsi/bnx2fc/bnx2fc_io.c 	atomic_dec(&io_req->tgt->num_active_ios);
tgt               668 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt;
tgt               704 drivers/scsi/bnx2fc/bnx2fc_io.c 	tgt = (struct bnx2fc_rport *)&rp[1];
tgt               706 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
tgt               712 drivers/scsi/bnx2fc/bnx2fc_io.c 	io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
tgt               725 drivers/scsi/bnx2fc/bnx2fc_io.c 	io_req->tgt = tgt;
tgt               732 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               734 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               750 drivers/scsi/bnx2fc/bnx2fc_io.c 	sid = tgt->sid;
tgt               758 drivers/scsi/bnx2fc/bnx2fc_io.c 	BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
tgt               771 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt               772 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_add_2_sq(tgt, xid);
tgt               776 drivers/scsi/bnx2fc/bnx2fc_io.c 	list_add_tail(&io_req->link, &tgt->active_tm_queue);
tgt               782 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_ring_doorbell(tgt);
tgt               783 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               787 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt               799 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt               802 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_lock_bh(&tgt->tgt_lock);
tgt               808 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               811 drivers/scsi/bnx2fc/bnx2fc_io.c 		BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
tgt               814 drivers/scsi/bnx2fc/bnx2fc_io.c 		BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
tgt               824 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt               825 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fc_rport *rport = tgt->rport;
tgt               826 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fc_rport_priv *rdata = tgt->rdata;
tgt               847 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
tgt               865 drivers/scsi/bnx2fc/bnx2fc_io.c 	abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
tgt               875 drivers/scsi/bnx2fc/bnx2fc_io.c 	abts_io_req->tgt = tgt;
tgt               888 drivers/scsi/bnx2fc/bnx2fc_io.c 	sid = tgt->sid;
tgt               921 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_add_2_sq(tgt, xid);
tgt               924 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_ring_doorbell(tgt);
tgt               933 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
tgt               958 drivers/scsi/bnx2fc/bnx2fc_io.c 	seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
tgt               968 drivers/scsi/bnx2fc/bnx2fc_io.c 	seq_clnp_req->tgt = tgt;
tgt               990 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_add_2_sq(tgt, xid);
tgt               993 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_ring_doorbell(tgt);
tgt              1000 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1016 drivers/scsi/bnx2fc/bnx2fc_io.c 	cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
tgt              1026 drivers/scsi/bnx2fc/bnx2fc_io.c 	cleanup_io_req->tgt = tgt;
tgt              1045 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_add_2_sq(tgt, xid);
tgt              1051 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_ring_doorbell(tgt);
tgt              1085 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1092 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt              1111 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt              1130 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt;
tgt              1144 drivers/scsi/bnx2fc/bnx2fc_io.c 	tgt = (struct bnx2fc_rport *)&rp[1];
tgt              1146 drivers/scsi/bnx2fc/bnx2fc_io.c 	BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
tgt              1148 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt              1153 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt              1162 drivers/scsi/bnx2fc/bnx2fc_io.c 	BUG_ON(tgt != io_req->tgt);
tgt              1170 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (tgt->flush_in_prog) {
tgt              1174 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt              1185 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt              1199 drivers/scsi/bnx2fc/bnx2fc_io.c 	list_add_tail(&io_req->link, &tgt->io_retire_queue);
tgt              1235 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_unlock_bh(&tgt->tgt_lock);
tgt              1237 drivers/scsi/bnx2fc/bnx2fc_io.c 		spin_lock_bh(&tgt->tgt_lock);
tgt              1241 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt              1250 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt              1280 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt              1293 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
tgt              1305 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt              1307 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt              1358 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1444 drivers/scsi/bnx2fc/bnx2fc_io.c 			list_add_tail(&io_req->link, &tgt->io_retire_queue);
tgt              1454 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1466 drivers/scsi/bnx2fc/bnx2fc_io.c 	list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
tgt              1467 drivers/scsi/bnx2fc/bnx2fc_io.c 		BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
tgt              1491 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1501 drivers/scsi/bnx2fc/bnx2fc_io.c 	list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
tgt              1502 drivers/scsi/bnx2fc/bnx2fc_io.c 		BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
tgt              1761 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1812 drivers/scsi/bnx2fc/bnx2fc_io.c 		rq_data = bnx2fc_get_next_rqe(tgt, 1);
tgt              1817 drivers/scsi/bnx2fc/bnx2fc_io.c 				dummy = bnx2fc_get_next_rqe(tgt, 1);
tgt              1842 drivers/scsi/bnx2fc/bnx2fc_io.c 			bnx2fc_return_rqe(tgt, 1);
tgt              1860 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt;
tgt              1878 drivers/scsi/bnx2fc/bnx2fc_io.c 	tgt = (struct bnx2fc_rport *)&rp[1];
tgt              1880 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
tgt              1888 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (tgt->retry_delay_timestamp) {
tgt              1889 drivers/scsi/bnx2fc/bnx2fc_io.c 		if (time_after(jiffies, tgt->retry_delay_timestamp)) {
tgt              1890 drivers/scsi/bnx2fc/bnx2fc_io.c 			tgt->retry_delay_timestamp = 0;
tgt              1898 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_lock_bh(&tgt->tgt_lock);
tgt              1900 drivers/scsi/bnx2fc/bnx2fc_io.c 	io_req = bnx2fc_cmd_alloc(tgt);
tgt              1907 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (bnx2fc_post_io_req(tgt, io_req)) {
tgt              1914 drivers/scsi/bnx2fc/bnx2fc_io.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt              1924 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct bnx2fc_rport *tgt = io_req->tgt;
tgt              1964 drivers/scsi/bnx2fc/bnx2fc_io.c 		list_add_tail(&io_req->link, &tgt->io_retire_queue);
tgt              2014 drivers/scsi/bnx2fc/bnx2fc_io.c 					tgt->retry_delay_timestamp = jiffies +
tgt              2032 drivers/scsi/bnx2fc/bnx2fc_io.c int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
tgt              2038 drivers/scsi/bnx2fc/bnx2fc_io.c 	struct fcoe_port *port = tgt->port;
tgt              2051 drivers/scsi/bnx2fc/bnx2fc_io.c 	io_req->tgt = tgt;
tgt              2087 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (tgt->flush_in_prog) {
tgt              2093 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
tgt              2100 drivers/scsi/bnx2fc/bnx2fc_io.c 	if (tgt->io_timeout)
tgt              2103 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_add_2_sq(tgt, xid);
tgt              2109 drivers/scsi/bnx2fc/bnx2fc_io.c 	list_add_tail(&io_req->link, &tgt->active_cmd_queue);
tgt              2112 drivers/scsi/bnx2fc/bnx2fc_io.c 	bnx2fc_ring_doorbell(tgt);
tgt                19 drivers/scsi/bnx2fc/bnx2fc_tgt.c static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt                23 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				struct bnx2fc_rport *tgt);
tgt                25 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			      struct bnx2fc_rport *tgt);
tgt                27 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			      struct bnx2fc_rport *tgt);
tgt                33 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
tgt                35 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
tgt                37 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
tgt                38 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
tgt                39 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
tgt                40 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	wake_up_interruptible(&tgt->upld_wait);
tgt                46 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
tgt                48 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
tgt                60 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
tgt                61 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
tgt                62 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
tgt                63 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	wake_up_interruptible(&tgt->ofld_wait);
tgt                66 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
tgt                68 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
tgt                69 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
tgt                71 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	wait_event_interruptible(tgt->ofld_wait,
tgt                74 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				  &tgt->flags)));
tgt                77 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	del_timer_sync(&tgt->ofld_timer);
tgt                81 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					struct bnx2fc_rport *tgt,
tgt                92 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	rval = bnx2fc_init_tgt(tgt, port, rdata);
tgt               100 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	rval = bnx2fc_alloc_session_resc(hba, tgt);
tgt               112 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
tgt               113 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	rval = bnx2fc_send_session_ofld_req(port, tgt);
tgt               123 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_ofld_wait(tgt);
tgt               125 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
tgt               127 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				       &tgt->flags)) {
tgt               128 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
tgt               139 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (bnx2fc_map_doorbell(tgt)) {
tgt               143 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
tgt               144 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	rval = bnx2fc_send_session_enable_req(port, tgt);
tgt               149 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_ofld_wait(tgt);
tgt               150 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
tgt               156 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
tgt               157 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
tgt               159 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_free_session_resc(hba, tgt);
tgt               161 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->fcoe_conn_id != -1)
tgt               162 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
tgt               166 drivers/scsi/bnx2fc/bnx2fc_tgt.c void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
tgt               172 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
tgt               173 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		       tgt->num_active_ios.counter);
tgt               175 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_lock_bh(&tgt->tgt_lock);
tgt               176 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->flush_in_prog = 1;
tgt               178 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
tgt               200 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
tgt               208 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
tgt               217 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
tgt               234 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
tgt               242 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
tgt               263 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
tgt               265 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               267 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
tgt               269 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->num_active_ios.counter != 0)
tgt               272 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
tgt               273 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_lock_bh(&tgt->tgt_lock);
tgt               274 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->flush_in_prog = 0;
tgt               275 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_unlock_bh(&tgt->tgt_lock);
tgt               278 drivers/scsi/bnx2fc/bnx2fc_tgt.c static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
tgt               280 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
tgt               281 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
tgt               282 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	wait_event_interruptible(tgt->upld_wait,
tgt               285 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				  &tgt->flags)));
tgt               288 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	del_timer_sync(&tgt->upld_timer);
tgt               292 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					struct bnx2fc_rport *tgt)
tgt               297 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
tgt               298 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->num_active_ios.counter);
tgt               304 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
tgt               305 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_send_session_disable_req(port, tgt);
tgt               311 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
tgt               312 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_upld_wait(tgt);
tgt               318 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
tgt               319 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		       tgt->flags);
tgt               320 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_flush_active_ios(tgt);
tgt               323 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
tgt               324 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		BNX2FC_TGT_DBG(tgt, "send destroy req\n");
tgt               325 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
tgt               326 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		bnx2fc_send_session_destroy_req(hba, tgt);
tgt               329 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		bnx2fc_upld_wait(tgt);
tgt               331 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
tgt               334 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
tgt               335 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->flags);
tgt               337 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
tgt               346 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_free_session_resc(hba, tgt);
tgt               347 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
tgt               350 drivers/scsi/bnx2fc/bnx2fc_tgt.c static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt               358 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
tgt               359 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
tgt               361 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rport = rport;
tgt               362 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rdata = rdata;
tgt               363 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->port = port;
tgt               366 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
tgt               367 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->fcoe_conn_id = -1;
tgt               371 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
tgt               372 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->fcoe_conn_id == -1)
tgt               375 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
tgt               377 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
tgt               378 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
tgt               379 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
tgt               380 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
tgt               383 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->sq_curr_toggle_bit = 1;
tgt               384 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->cq_curr_toggle_bit = 1;
tgt               385 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->sq_prod_idx = 0;
tgt               386 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->cq_cons_idx = 0;
tgt               387 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_prod_idx = 0x8000;
tgt               388 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_cons_idx = 0;
tgt               389 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	atomic_set(&tgt->num_active_ios, 0);
tgt               390 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->retry_delay_timestamp = 0;
tgt               395 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->dev_type = TYPE_TAPE;
tgt               396 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->io_timeout = 0; /* use default ULP timeout */
tgt               398 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->dev_type = TYPE_DISK;
tgt               399 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->io_timeout = BNX2FC_IO_TIMEOUT;
tgt               414 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_lock_init(&tgt->tgt_lock);
tgt               415 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_lock_init(&tgt->cq_lock);
tgt               418 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	INIT_LIST_HEAD(&tgt->active_cmd_queue);
tgt               421 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	INIT_LIST_HEAD(&tgt->io_retire_queue);
tgt               423 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	INIT_LIST_HEAD(&tgt->els_queue);
tgt               426 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	INIT_LIST_HEAD(&tgt->active_tm_queue);
tgt               428 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	init_waitqueue_head(&tgt->ofld_wait);
tgt               429 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	init_waitqueue_head(&tgt->upld_wait);
tgt               448 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct bnx2fc_rport *tgt;
tgt               489 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt = (struct bnx2fc_rport *)&rp[1];
tgt               492 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
tgt               493 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			BNX2FC_TGT_DBG(tgt, "already offloaded\n");
tgt               502 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		bnx2fc_offload_session(port, tgt, rdata);
tgt               504 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
tgt               507 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
tgt               509 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
tgt               513 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
tgt               520 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
tgt               543 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt = (struct bnx2fc_rport *)&rp[1];
tgt               545 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
tgt               549 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
tgt               551 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		bnx2fc_upload_session(port, tgt);
tgt               553 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
tgt               583 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	struct bnx2fc_rport *tgt;
tgt               588 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt = hba->tgt_ofld_list[i];
tgt               589 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		if ((tgt) && (tgt->port == port)) {
tgt               590 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			rdata = tgt->rdata;
tgt               593 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					BNX2FC_TGT_DBG(tgt, "rport "
tgt               595 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					return tgt;
tgt               597 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					BNX2FC_TGT_DBG(tgt, "rport 0x%x "
tgt               616 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				struct bnx2fc_rport *tgt)
tgt               645 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	hba->tgt_ofld_list[conn_id] = tgt;
tgt               646 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->fcoe_conn_id = conn_id;
tgt               664 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					struct bnx2fc_rport *tgt)
tgt               671 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
tgt               672 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
tgt               675 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
tgt               676 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				     &tgt->sq_dma, GFP_KERNEL);
tgt               677 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->sq) {
tgt               679 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->sq_mem_size);
tgt               684 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt               685 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
tgt               688 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
tgt               689 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				     &tgt->cq_dma, GFP_KERNEL);
tgt               690 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->cq) {
tgt               692 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->cq_mem_size);
tgt               697 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt               698 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
tgt               701 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
tgt               702 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				     &tgt->rq_dma, GFP_KERNEL);
tgt               703 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->rq) {
tgt               705 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->rq_mem_size);
tgt               709 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt               710 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
tgt               713 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
tgt               714 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					 &tgt->rq_pbl_dma, GFP_KERNEL);
tgt               715 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->rq_pbl) {
tgt               717 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->rq_pbl_size);
tgt               721 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
tgt               722 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	page = tgt->rq_dma;
tgt               723 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	pbl = (u32 *)tgt->rq_pbl;
tgt               734 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
tgt               735 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
tgt               738 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
tgt               739 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					tgt->xferq_mem_size, &tgt->xferq_dma,
tgt               741 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->xferq) {
tgt               743 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->xferq_mem_size);
tgt               748 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt               749 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
tgt               752 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
tgt               753 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					tgt->confq_mem_size, &tgt->confq_dma,
tgt               755 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->confq) {
tgt               757 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->confq_mem_size);
tgt               761 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->confq_pbl_size =
tgt               762 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt               763 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->confq_pbl_size =
tgt               764 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
tgt               766 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
tgt               767 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					    tgt->confq_pbl_size,
tgt               768 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					    &tgt->confq_pbl_dma, GFP_KERNEL);
tgt               769 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->confq_pbl) {
tgt               771 drivers/scsi/bnx2fc/bnx2fc_tgt.c 			tgt->confq_pbl_size);
tgt               775 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
tgt               776 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	page = tgt->confq_dma;
tgt               777 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	pbl = (u32 *)tgt->confq_pbl;
tgt               788 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
tgt               790 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
tgt               791 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					  tgt->conn_db_mem_size,
tgt               792 drivers/scsi/bnx2fc/bnx2fc_tgt.c 					  &tgt->conn_db_dma, GFP_KERNEL);
tgt               793 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->conn_db) {
tgt               795 drivers/scsi/bnx2fc/bnx2fc_tgt.c 						tgt->conn_db_mem_size);
tgt               801 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
tgt               802 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
tgt               805 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
tgt               806 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				      &tgt->lcq_dma, GFP_KERNEL);
tgt               808 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->lcq) {
tgt               810 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		       tgt->lcq_mem_size);
tgt               814 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->conn_db->rq_prod = 0x8000;
tgt               831 drivers/scsi/bnx2fc/bnx2fc_tgt.c 						struct bnx2fc_rport *tgt)
tgt               835 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
tgt               837 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_lock_bh(&tgt->cq_lock);
tgt               838 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	ctx_base_ptr = tgt->ctx_base;
tgt               839 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->ctx_base = NULL;
tgt               842 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->lcq) {
tgt               843 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
tgt               844 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->lcq, tgt->lcq_dma);
tgt               845 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->lcq = NULL;
tgt               848 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->conn_db) {
tgt               849 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
tgt               850 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->conn_db, tgt->conn_db_dma);
tgt               851 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->conn_db = NULL;
tgt               854 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->confq_pbl) {
tgt               855 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
tgt               856 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->confq_pbl, tgt->confq_pbl_dma);
tgt               857 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->confq_pbl = NULL;
tgt               859 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->confq) {
tgt               860 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
tgt               861 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->confq, tgt->confq_dma);
tgt               862 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->confq = NULL;
tgt               865 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->xferq) {
tgt               866 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
tgt               867 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->xferq, tgt->xferq_dma);
tgt               868 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->xferq = NULL;
tgt               871 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->rq_pbl) {
tgt               872 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
tgt               873 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->rq_pbl, tgt->rq_pbl_dma);
tgt               874 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->rq_pbl = NULL;
tgt               876 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->rq) {
tgt               877 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
tgt               878 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->rq, tgt->rq_dma);
tgt               879 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->rq = NULL;
tgt               882 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->cq) {
tgt               883 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
tgt               884 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->cq, tgt->cq_dma);
tgt               885 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->cq = NULL;
tgt               888 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->sq) {
tgt               889 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
tgt               890 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->sq, tgt->sq_dma);
tgt               891 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->sq = NULL;
tgt               893 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	spin_unlock_bh(&tgt->cq_lock);
tgt               301 drivers/scsi/csiostor/csio_mb.c 		    bool wr, bool init, bool tgt, bool cofld,
tgt               325 drivers/scsi/csiostor/csio_mb.c 	if (tgt)
tgt               170 drivers/scsi/dc395x.c #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
tgt               171 drivers/scsi/dc395x.c #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
tgt               173 drivers/scsi/dc395x.c #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
tgt               174 drivers/scsi/dc395x.c #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
tgt               492 drivers/scsi/esp_scsi.c static void esp_write_tgt_config3(struct esp *esp, int tgt)
tgt               495 drivers/scsi/esp_scsi.c 		u8 val = esp->target[tgt].esp_config3;
tgt               504 drivers/scsi/esp_scsi.c static void esp_write_tgt_sync(struct esp *esp, int tgt)
tgt               506 drivers/scsi/esp_scsi.c 	u8 off = esp->target[tgt].esp_offset;
tgt               507 drivers/scsi/esp_scsi.c 	u8 per = esp->target[tgt].esp_period;
tgt               658 drivers/scsi/esp_scsi.c 	int tgt, lun;
tgt               661 drivers/scsi/esp_scsi.c 	tgt = dev->id;
tgt               667 drivers/scsi/esp_scsi.c 				  tgt, lun);
tgt               688 drivers/scsi/esp_scsi.c 	val = tgt;
tgt               693 drivers/scsi/esp_scsi.c 	esp_write_tgt_sync(esp, tgt);
tgt               694 drivers/scsi/esp_scsi.c 	esp_write_tgt_config3(esp, tgt);
tgt               739 drivers/scsi/esp_scsi.c 	int tgt, lun, i;
tgt               758 drivers/scsi/esp_scsi.c 	tgt = dev->id;
tgt               760 drivers/scsi/esp_scsi.c 	tp = &esp->target[tgt];
tgt               855 drivers/scsi/esp_scsi.c 	val = tgt;
tgt               860 drivers/scsi/esp_scsi.c 	esp_write_tgt_sync(esp, tgt);
tgt               861 drivers/scsi/esp_scsi.c 	esp_write_tgt_config3(esp, tgt);
tgt               866 drivers/scsi/esp_scsi.c 		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
tgt               899 drivers/scsi/esp_scsi.c 	int tgt = dev->id;
tgt               929 drivers/scsi/esp_scsi.c 			       esp->host->unique_id, tgt, lun);
tgt              1459 drivers/scsi/esp_scsi.c 	int tgt;
tgt              1461 drivers/scsi/esp_scsi.c 	tgt = cmd->device->id;
tgt              1462 drivers/scsi/esp_scsi.c 	tp = &esp->target[tgt];
tgt              1599 drivers/scsi/esp_scsi.c 	int tgt = cmd->device->id;
tgt              1601 drivers/scsi/esp_scsi.c 	tp = &esp->target[tgt];
tgt               417 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
tgt               420 drivers/scsi/ibmvscsi/ibmvfc.c 	switch (tgt->action) {
tgt               423 drivers/scsi/ibmvscsi/ibmvfc.c 			tgt->action = action;
tgt               428 drivers/scsi/ibmvscsi/ibmvfc.c 			tgt->add_rport = 0;
tgt               429 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->action = action;
tgt               548 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt               552 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue)
tgt               553 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt               570 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt               585 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
tgt               586 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt               906 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt               908 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue)
tgt               909 drivers/scsi/ibmvscsi/ibmvfc.c 		if (tgt->target_id == starget->id) {
tgt               910 drivers/scsi/ibmvscsi/ibmvfc.c 			kref_get(&tgt->kref);
tgt               911 drivers/scsi/ibmvscsi/ibmvfc.c 			return tgt;
tgt               926 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt               930 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt = __ibmvfc_get_target(starget);
tgt               932 drivers/scsi/ibmvscsi/ibmvfc.c 	return tgt;
tgt              1041 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
tgt              1042 drivers/scsi/ibmvscsi/ibmvfc.c 	kfree(tgt);
tgt              1054 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
tgt              1055 drivers/scsi/ibmvscsi/ibmvfc.c 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
tgt              1056 drivers/scsi/ibmvscsi/ibmvfc.c 	if (tgt)
tgt              1057 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              1069 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
tgt              1070 drivers/scsi/ibmvscsi/ibmvfc.c 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
tgt              1071 drivers/scsi/ibmvscsi/ibmvfc.c 	if (tgt)
tgt              1072 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              1084 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
tgt              1085 drivers/scsi/ibmvscsi/ibmvfc.c 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
tgt              1086 drivers/scsi/ibmvscsi/ibmvfc.c 	if (tgt)
tgt              1087 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              1499 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              1501 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              1502 drivers/scsi/ibmvscsi/ibmvfc.c 		if (rport == tgt->rport) {
tgt              1503 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              1752 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              1760 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              1761 drivers/scsi/ibmvscsi/ibmvfc.c 		if (tgt->scsi_id == port_id) {
tgt              2630 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              2677 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              2680 drivers/scsi/ibmvscsi/ibmvfc.c 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
tgt              2682 drivers/scsi/ibmvscsi/ibmvfc.c 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
tgt              2684 drivers/scsi/ibmvscsi/ibmvfc.c 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
tgt              2686 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
tgt              2687 drivers/scsi/ibmvscsi/ibmvfc.c 				tgt->logo_rcvd = 1;
tgt              2688 drivers/scsi/ibmvscsi/ibmvfc.c 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
tgt              2689 drivers/scsi/ibmvscsi/ibmvfc.c 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3220 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
tgt              3223 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
tgt              3224 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->job_step = job_step;
tgt              3225 drivers/scsi/ibmvscsi/ibmvfc.c 	wake_up(&tgt->vhost->work_wait_q);
tgt              3236 drivers/scsi/ibmvscsi/ibmvfc.c static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
tgt              3239 drivers/scsi/ibmvscsi/ibmvfc.c 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
tgt              3240 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3241 drivers/scsi/ibmvscsi/ibmvfc.c 		wake_up(&tgt->vhost->work_wait_q);
tgt              3244 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_init_tgt(tgt, job_step);
tgt              3289 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = evt->tgt;
tgt              3297 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3300 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
tgt              3307 drivers/scsi/ibmvscsi/ibmvfc.c 					tgt->need_login = 0;
tgt              3308 drivers/scsi/ibmvscsi/ibmvfc.c 					tgt->ids.roles = 0;
tgt              3310 drivers/scsi/ibmvscsi/ibmvfc.c 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
tgt              3312 drivers/scsi/ibmvscsi/ibmvfc.c 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt              3313 drivers/scsi/ibmvscsi/ibmvfc.c 					tgt->add_rport = 1;
tgt              3315 drivers/scsi/ibmvscsi/ibmvfc.c 					ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3317 drivers/scsi/ibmvscsi/ibmvfc.c 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
tgt              3319 drivers/scsi/ibmvscsi/ibmvfc.c 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3321 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3326 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
tgt              3332 drivers/scsi/ibmvscsi/ibmvfc.c 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
tgt              3333 drivers/scsi/ibmvscsi/ibmvfc.c 		else if (tgt->logo_rcvd)
tgt              3334 drivers/scsi/ibmvscsi/ibmvfc.c 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
tgt              3336 drivers/scsi/ibmvscsi/ibmvfc.c 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
tgt              3338 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3340 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
tgt              3346 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3356 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
tgt              3359 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              3365 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_get(&tgt->kref);
tgt              3369 drivers/scsi/ibmvscsi/ibmvfc.c 	evt->tgt = tgt;
tgt              3375 drivers/scsi/ibmvscsi/ibmvfc.c 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
tgt              3385 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
tgt              3388 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3389 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3391 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Sent process login\n");
tgt              3401 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = evt->tgt;
tgt              3408 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3411 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Port Login succeeded\n");
tgt              3412 drivers/scsi/ibmvscsi/ibmvfc.c 		if (tgt->ids.port_name &&
tgt              3413 drivers/scsi/ibmvscsi/ibmvfc.c 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
tgt              3415 drivers/scsi/ibmvscsi/ibmvfc.c 			tgt_dbg(tgt, "Port re-init required\n");
tgt              3418 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
tgt              3419 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
tgt              3420 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->ids.port_id = tgt->scsi_id;
tgt              3421 drivers/scsi/ibmvscsi/ibmvfc.c 		memcpy(&tgt->service_parms, &rsp->service_parms,
tgt              3422 drivers/scsi/ibmvscsi/ibmvfc.c 		       sizeof(tgt->service_parms));
tgt              3423 drivers/scsi/ibmvscsi/ibmvfc.c 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
tgt              3424 drivers/scsi/ibmvscsi/ibmvfc.c 		       sizeof(tgt->service_parms_change));
tgt              3425 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
tgt              3430 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
tgt              3435 drivers/scsi/ibmvscsi/ibmvfc.c 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
tgt              3437 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3439 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
tgt              3447 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3457 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
tgt              3460 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              3466 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_get(&tgt->kref);
tgt              3467 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->logo_rcvd = 0;
tgt              3470 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
tgt              3472 drivers/scsi/ibmvscsi/ibmvfc.c 	evt->tgt = tgt;
tgt              3478 drivers/scsi/ibmvscsi/ibmvfc.c 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
tgt              3482 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3483 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3485 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Sent port login\n");
tgt              3495 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = evt->tgt;
tgt              3502 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3506 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
tgt              3509 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3514 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
tgt              3519 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
tgt              3521 drivers/scsi/ibmvscsi/ibmvfc.c 		 tgt->scsi_id != tgt->new_scsi_id)
tgt              3522 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3523 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3532 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
tgt              3535 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              3541 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_get(&tgt->kref);
tgt              3545 drivers/scsi/ibmvscsi/ibmvfc.c 	evt->tgt = tgt;
tgt              3551 drivers/scsi/ibmvscsi/ibmvfc.c 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
tgt              3553 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
tgt              3556 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3557 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3559 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Sent Implicit Logout\n");
tgt              3571 drivers/scsi/ibmvscsi/ibmvfc.c 				    struct ibmvfc_target *tgt)
tgt              3573 drivers/scsi/ibmvscsi/ibmvfc.c 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
tgt              3575 drivers/scsi/ibmvscsi/ibmvfc.c 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
tgt              3577 drivers/scsi/ibmvscsi/ibmvfc.c 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
tgt              3589 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = evt->tgt;
tgt              3596 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3597 drivers/scsi/ibmvscsi/ibmvfc.c 	del_timer(&tgt->timer);
tgt              3601 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "ADISC succeeded\n");
tgt              3602 drivers/scsi/ibmvscsi/ibmvfc.c 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
tgt              3603 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3609 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3612 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
tgt              3620 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3666 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = evt->tgt;
tgt              3668 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt_dbg(tgt, "ADISC cancel complete\n");
tgt              3671 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3685 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
tgt              3686 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              3692 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt_dbg(tgt, "ADISC timeout\n");
tgt              3695 drivers/scsi/ibmvscsi/ibmvfc.c 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
tgt              3703 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_get(&tgt->kref);
tgt              3707 drivers/scsi/ibmvscsi/ibmvfc.c 	evt->tgt = tgt;
tgt              3713 drivers/scsi/ibmvscsi/ibmvfc.c 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
tgt              3714 drivers/scsi/ibmvscsi/ibmvfc.c 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
tgt              3719 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
tgt              3721 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3724 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
tgt              3739 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
tgt              3742 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              3748 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_get(&tgt->kref);
tgt              3752 drivers/scsi/ibmvscsi/ibmvfc.c 	evt->tgt = tgt;
tgt              3757 drivers/scsi/ibmvscsi/ibmvfc.c 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
tgt              3758 drivers/scsi/ibmvscsi/ibmvfc.c 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
tgt              3767 drivers/scsi/ibmvscsi/ibmvfc.c 	if (timer_pending(&tgt->timer))
tgt              3768 drivers/scsi/ibmvscsi/ibmvfc.c 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
tgt              3770 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
tgt              3771 drivers/scsi/ibmvscsi/ibmvfc.c 		add_timer(&tgt->timer);
tgt              3774 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
tgt              3777 drivers/scsi/ibmvscsi/ibmvfc.c 		del_timer(&tgt->timer);
tgt              3778 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3779 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3781 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Sent ADISC\n");
tgt              3791 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt = evt->tgt;
tgt              3798 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3801 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Query Target succeeded\n");
tgt              3802 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
tgt              3803 drivers/scsi/ibmvscsi/ibmvfc.c 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
tgt              3804 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
tgt              3806 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
tgt              3811 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
tgt              3818 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3820 drivers/scsi/ibmvscsi/ibmvfc.c 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
tgt              3822 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt              3824 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
tgt              3833 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3843 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
tgt              3846 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              3852 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_get(&tgt->kref);
tgt              3855 drivers/scsi/ibmvscsi/ibmvfc.c 	evt->tgt = tgt;
tgt              3862 drivers/scsi/ibmvscsi/ibmvfc.c 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
tgt              3864 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
tgt              3867 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
tgt              3868 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              3870 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Sent Query Target\n");
tgt              3883 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              3887 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              3888 drivers/scsi/ibmvscsi/ibmvfc.c 		if (tgt->scsi_id == scsi_id) {
tgt              3889 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->need_login)
tgt              3890 drivers/scsi/ibmvscsi/ibmvfc.c 				ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
tgt              3896 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
tgt              3897 drivers/scsi/ibmvscsi/ibmvfc.c 	memset(tgt, 0, sizeof(*tgt));
tgt              3898 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->scsi_id = scsi_id;
tgt              3899 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->new_scsi_id = scsi_id;
tgt              3900 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->vhost = vhost;
tgt              3901 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->need_login = 1;
tgt              3902 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt->cancel_key = vhost->task_set++;
tgt              3903 drivers/scsi/ibmvscsi/ibmvfc.c 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
tgt              3904 drivers/scsi/ibmvscsi/ibmvfc.c 	kref_init(&tgt->kref);
tgt              3905 drivers/scsi/ibmvscsi/ibmvfc.c 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
tgt              3907 drivers/scsi/ibmvscsi/ibmvfc.c 	list_add_tail(&tgt->queue, &vhost->targets);
tgt              4180 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              4182 drivers/scsi/ibmvscsi/ibmvfc.c 	list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              4183 drivers/scsi/ibmvscsi/ibmvfc.c 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
tgt              4184 drivers/scsi/ibmvscsi/ibmvfc.c 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
tgt              4200 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              4213 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
tgt              4214 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
tgt              4216 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
tgt              4217 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
tgt              4276 drivers/scsi/ibmvscsi/ibmvfc.c static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
tgt              4278 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_host *vhost = tgt->vhost;
tgt              4282 drivers/scsi/ibmvscsi/ibmvfc.c 	tgt_dbg(tgt, "Adding rport\n");
tgt              4283 drivers/scsi/ibmvscsi/ibmvfc.c 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
tgt              4286 drivers/scsi/ibmvscsi/ibmvfc.c 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt              4287 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "Deleting rport\n");
tgt              4288 drivers/scsi/ibmvscsi/ibmvfc.c 		list_del(&tgt->queue);
tgt              4289 drivers/scsi/ibmvscsi/ibmvfc.c 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
tgt              4292 drivers/scsi/ibmvscsi/ibmvfc.c 		del_timer_sync(&tgt->timer);
tgt              4293 drivers/scsi/ibmvscsi/ibmvfc.c 		kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              4295 drivers/scsi/ibmvscsi/ibmvfc.c 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
tgt              4301 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "rport add succeeded\n");
tgt              4302 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->rport = rport;
tgt              4303 drivers/scsi/ibmvscsi/ibmvfc.c 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
tgt              4305 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt->target_id = rport->scsi_target_id;
tgt              4306 drivers/scsi/ibmvscsi/ibmvfc.c 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
tgt              4308 drivers/scsi/ibmvscsi/ibmvfc.c 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
tgt              4310 drivers/scsi/ibmvscsi/ibmvfc.c 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
tgt              4315 drivers/scsi/ibmvscsi/ibmvfc.c 		tgt_dbg(tgt, "rport add failed\n");
tgt              4326 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              4376 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue)
tgt              4377 drivers/scsi/ibmvscsi/ibmvfc.c 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
tgt              4381 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              4382 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
tgt              4383 drivers/scsi/ibmvscsi/ibmvfc.c 				tgt->job_step(tgt);
tgt              4393 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              4394 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt              4395 drivers/scsi/ibmvscsi/ibmvfc.c 				tgt_dbg(tgt, "Deleting rport\n");
tgt              4396 drivers/scsi/ibmvscsi/ibmvfc.c 				rport = tgt->rport;
tgt              4397 drivers/scsi/ibmvscsi/ibmvfc.c 				tgt->rport = NULL;
tgt              4398 drivers/scsi/ibmvscsi/ibmvfc.c 				list_del(&tgt->queue);
tgt              4399 drivers/scsi/ibmvscsi/ibmvfc.c 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
tgt              4403 drivers/scsi/ibmvscsi/ibmvfc.c 				del_timer_sync(&tgt->timer);
tgt              4404 drivers/scsi/ibmvscsi/ibmvfc.c 				kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt              4446 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              4447 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
tgt              4448 drivers/scsi/ibmvscsi/ibmvfc.c 				tgt->job_step(tgt);
tgt              4693 drivers/scsi/ibmvscsi/ibmvfc.c 	struct ibmvfc_target *tgt;
tgt              4705 drivers/scsi/ibmvscsi/ibmvfc.c 		list_for_each_entry(tgt, &vhost->targets, queue) {
tgt              4706 drivers/scsi/ibmvscsi/ibmvfc.c 			if (tgt->add_rport) {
tgt              4708 drivers/scsi/ibmvscsi/ibmvfc.c 				tgt->add_rport = 0;
tgt              4709 drivers/scsi/ibmvscsi/ibmvfc.c 				kref_get(&tgt->kref);
tgt              4710 drivers/scsi/ibmvscsi/ibmvfc.c 				rport = tgt->rport;
tgt              4713 drivers/scsi/ibmvscsi/ibmvfc.c 					ibmvfc_tgt_add_rport(tgt);
tgt              4716 drivers/scsi/ibmvscsi/ibmvfc.c 					tgt_dbg(tgt, "Setting rport roles\n");
tgt              4717 drivers/scsi/ibmvscsi/ibmvfc.c 					fc_remote_port_rolechg(rport, tgt->ids.roles);
tgt              4723 drivers/scsi/ibmvscsi/ibmvfc.c 				kref_put(&tgt->kref, ibmvfc_release_tgt);
tgt               628 drivers/scsi/ibmvscsi/ibmvfc.h 	struct ibmvfc_target *tgt;
tgt               457 drivers/scsi/megaraid.c 	int		tgt;
tgt               460 drivers/scsi/megaraid.c 	tgt = cmd->device->id;
tgt               462 drivers/scsi/megaraid.c 	if ( tgt > adapter->this_id )
tgt               463 drivers/scsi/megaraid.c 		tgt--;	/* we do not get inquires for initiator id */
tgt               465 drivers/scsi/megaraid.c 	ldrv_num = (channel * 15) + tgt;
tgt              2336 drivers/scsi/megaraid.c 	int	tgt;
tgt              2373 drivers/scsi/megaraid.c 	for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
tgt              2375 drivers/scsi/megaraid.c 		i = channel*16 + tgt;
tgt              2381 drivers/scsi/megaraid.c 				   channel, tgt);
tgt              2386 drivers/scsi/megaraid.c 				   channel, tgt);
tgt              2391 drivers/scsi/megaraid.c 				   channel, tgt);
tgt              2396 drivers/scsi/megaraid.c 				   channel, tgt);
tgt              2401 drivers/scsi/megaraid.c 				   channel, tgt);
tgt              2411 drivers/scsi/megaraid.c 		if( mega_internal_dev_inquiry(adapter, channel, tgt,
tgt              4004 drivers/scsi/megaraid.c mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
tgt              4035 drivers/scsi/megaraid.c 	pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
tgt                82 drivers/scsi/mesh.c #define ALLOW_SYNC(tgt)		((sync_targets >> (tgt)) & 1)
tgt                83 drivers/scsi/mesh.c #define ALLOW_RESEL(tgt)	((resel_targets >> (tgt)) & 1)
tgt                84 drivers/scsi/mesh.c #define ALLOW_DEBUG(tgt)	((debug_targets >> (tgt)) & 1)
tgt                99 drivers/scsi/mesh.c 	u8	tgt;
tgt               225 drivers/scsi/mesh.c 	tlp->tgt = ms->conn_tgt;
tgt               278 drivers/scsi/mesh.c 		       lp->bs1, lp->bs0, lp->phase, lp->tgt);
tgt               293 drivers/scsi/mesh.c static inline void dumplog(struct mesh_state *ms, int tgt)
tgt               988 drivers/scsi/mesh.c 	int tgt;
tgt               993 drivers/scsi/mesh.c 	for (tgt = 0; tgt < 8; ++tgt) {
tgt               994 drivers/scsi/mesh.c 		tp = &ms->tgts[tgt];
tgt              1000 drivers/scsi/mesh.c 		ms->tgts[tgt].sdtr_state = do_sdtr;
tgt              1001 drivers/scsi/mesh.c 		ms->tgts[tgt].sync_params = ASYNC_PARAMS;
tgt              1849 drivers/scsi/mesh.c 	int tgt, minper;
tgt              1932 drivers/scsi/mesh.c        	for (tgt = 0; tgt < 8; ++tgt) {
tgt              1933 drivers/scsi/mesh.c 	       	ms->tgts[tgt].sdtr_state = do_sdtr;
tgt              1934 drivers/scsi/mesh.c 	       	ms->tgts[tgt].sync_params = ASYNC_PARAMS;
tgt              1935 drivers/scsi/mesh.c 	       	ms->tgts[tgt].current_req = NULL;
tgt               916 drivers/scsi/qedi/qedi_main.c 				   struct qedi_boot_target *tgt, u8 index)
tgt               923 drivers/scsi/qedi/qedi_main.c 	snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
tgt               926 drivers/scsi/qedi/qedi_main.c 	tgt->ipv6_en = ipv6_en;
tgt               929 drivers/scsi/qedi/qedi_main.c 		snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
tgt               932 drivers/scsi/qedi/qedi_main.c 		snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
tgt               556 drivers/scsi/qla2xxx/qla_dbg.c 	if (!ha->tgt.atio_ring)
tgt               561 drivers/scsi/qla2xxx/qla_dbg.c 	aqp->length = ha->tgt.atio_q_length;
tgt               562 drivers/scsi/qla2xxx/qla_dbg.c 	aqp->ring = ha->tgt.atio_ring;
tgt               914 drivers/scsi/qla2xxx/qla_def.h #define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
tgt               915 drivers/scsi/qla2xxx/qla_def.h #define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
tgt              2426 drivers/scsi/qla2xxx/qla_def.h 	struct qla_tgt *tgt;
tgt              3897 drivers/scsi/qla2xxx/qla_def.h #define IS_TGT_MODE_CAPABLE(ha)	(ha->tgt.atio_q_length)
tgt              4272 drivers/scsi/qla2xxx/qla_def.h 	struct qlt_hw_data tgt;
tgt                22 drivers/scsi/qla2xxx/qla_dfs.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt                25 drivers/scsi/qla2xxx/qla_dfs.c 	if (tgt) {
tgt                28 drivers/scsi/qla2xxx/qla_dfs.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt                34 drivers/scsi/qla2xxx/qla_dfs.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt                65 drivers/scsi/qla2xxx/qla_dfs.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt                68 drivers/scsi/qla2xxx/qla_dfs.c 	if (tgt) {
tgt               366 drivers/scsi/qla2xxx/qla_dfs.c 	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
tgt               416 drivers/scsi/qla2xxx/qla_dfs.c 	if (num_act_qp != ha->tgt.num_act_qpairs) {
tgt               417 drivers/scsi/qla2xxx/qla_dfs.c 		ha->tgt.num_act_qpairs = num_act_qp;
tgt               468 drivers/scsi/qla2xxx/qla_dfs.c 	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
tgt               474 drivers/scsi/qla2xxx/qla_dfs.c 	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
tgt               478 drivers/scsi/qla2xxx/qla_dfs.c 		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
tgt               489 drivers/scsi/qla2xxx/qla_dfs.c 	if (ha->tgt.dfs_naqp) {
tgt               490 drivers/scsi/qla2xxx/qla_dfs.c 		debugfs_remove(ha->tgt.dfs_naqp);
tgt               491 drivers/scsi/qla2xxx/qla_dfs.c 		ha->tgt.dfs_naqp = NULL;
tgt               494 drivers/scsi/qla2xxx/qla_dfs.c 	if (ha->tgt.dfs_tgt_sess) {
tgt               495 drivers/scsi/qla2xxx/qla_dfs.c 		debugfs_remove(ha->tgt.dfs_tgt_sess);
tgt               496 drivers/scsi/qla2xxx/qla_dfs.c 		ha->tgt.dfs_tgt_sess = NULL;
tgt               499 drivers/scsi/qla2xxx/qla_dfs.c 	if (ha->tgt.dfs_tgt_port_database) {
tgt               500 drivers/scsi/qla2xxx/qla_dfs.c 		debugfs_remove(ha->tgt.dfs_tgt_port_database);
tgt               501 drivers/scsi/qla2xxx/qla_dfs.c 		ha->tgt.dfs_tgt_port_database = NULL;
tgt              3277 drivers/scsi/qla2xxx/qla_gs.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              3279 drivers/scsi/qla2xxx/qla_gs.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              3341 drivers/scsi/qla2xxx/qla_gs.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              3345 drivers/scsi/qla2xxx/qla_gs.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              3351 drivers/scsi/qla2xxx/qla_gs.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1012 drivers/scsi/qla2xxx/qla_init.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1018 drivers/scsi/qla2xxx/qla_init.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1022 drivers/scsi/qla2xxx/qla_init.c 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1024 drivers/scsi/qla2xxx/qla_init.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1062 drivers/scsi/qla2xxx/qla_init.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1064 drivers/scsi/qla2xxx/qla_init.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1083 drivers/scsi/qla2xxx/qla_init.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1091 drivers/scsi/qla2xxx/qla_init.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1095 drivers/scsi/qla2xxx/qla_init.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1378 drivers/scsi/qla2xxx/qla_init.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1387 drivers/scsi/qla2xxx/qla_init.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1389 drivers/scsi/qla2xxx/qla_init.c 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1401 drivers/scsi/qla2xxx/qla_init.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              2003 drivers/scsi/qla2xxx/qla_init.c 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              2008 drivers/scsi/qla2xxx/qla_init.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              3240 drivers/scsi/qla2xxx/qla_init.c 		if (ha->tgt.atio_ring)
tgt              3241 drivers/scsi/qla2xxx/qla_init.c 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
tgt              3974 drivers/scsi/qla2xxx/qla_init.c 	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
tgt              3975 drivers/scsi/qla2xxx/qla_init.c 	put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
tgt              4089 drivers/scsi/qla2xxx/qla_init.c 	ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
tgt              4090 drivers/scsi/qla2xxx/qla_init.c 	ha->tgt.atio_ring_index = 0;
tgt              4993 drivers/scsi/qla2xxx/qla_init.c 				spin_lock_irqsave(&ha->tgt.atio_lock, flags);
tgt              4995 drivers/scsi/qla2xxx/qla_init.c 				spin_unlock_irqrestore(&ha->tgt.atio_lock,
tgt              5184 drivers/scsi/qla2xxx/qla_init.c 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              5211 drivers/scsi/qla2xxx/qla_init.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              5221 drivers/scsi/qla2xxx/qla_init.c 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              5225 drivers/scsi/qla2xxx/qla_init.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              5807 drivers/scsi/qla2xxx/qla_init.c 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              5881 drivers/scsi/qla2xxx/qla_init.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              5888 drivers/scsi/qla2xxx/qla_init.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt               282 drivers/scsi/qla2xxx/qla_inline.h qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
tgt               287 drivers/scsi/qla2xxx/qla_inline.h 	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
tgt               288 drivers/scsi/qla2xxx/qla_inline.h 		h = &tgt->qphints[i];
tgt              3265 drivers/scsi/qla2xxx/qla_isr.c 		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
tgt              3267 drivers/scsi/qla2xxx/qla_isr.c 		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
tgt              3391 drivers/scsi/qla2xxx/qla_isr.c 		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
tgt              3393 drivers/scsi/qla2xxx/qla_isr.c 		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
tgt              3886 drivers/scsi/qla2xxx/qla_mbx.c 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              3894 drivers/scsi/qla2xxx/qla_mbx.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1094 drivers/scsi/qla2xxx/qla_os.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1099 drivers/scsi/qla2xxx/qla_os.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1758 drivers/scsi/qla2xxx/qla_os.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              1773 drivers/scsi/qla2xxx/qla_os.c 				if (!vha->hw->tgt.tgt_ops || !tgt ||
tgt              2820 drivers/scsi/qla2xxx/qla_os.c 	INIT_LIST_HEAD(&ha->tgt.q_full_list);
tgt              2821 drivers/scsi/qla2xxx/qla_os.c 	spin_lock_init(&ha->tgt.q_full_lock);
tgt              2822 drivers/scsi/qla2xxx/qla_os.c 	spin_lock_init(&ha->tgt.sess_lock);
tgt              2823 drivers/scsi/qla2xxx/qla_os.c 	spin_lock_init(&ha->tgt.atio_lock);
tgt              2898 drivers/scsi/qla2xxx/qla_os.c 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              2914 drivers/scsi/qla2xxx/qla_os.c 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              2930 drivers/scsi/qla2xxx/qla_os.c 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              2977 drivers/scsi/qla2xxx/qla_os.c 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              3008 drivers/scsi/qla2xxx/qla_os.c 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              3025 drivers/scsi/qla2xxx/qla_os.c 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              5035 drivers/scsi/qla2xxx/qla_os.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              5051 drivers/scsi/qla2xxx/qla_os.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              5083 drivers/scsi/qla2xxx/qla_os.c 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              5105 drivers/scsi/qla2xxx/qla_os.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              5139 drivers/scsi/qla2xxx/qla_os.c 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              5165 drivers/scsi/qla2xxx/qla_os.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt               121 drivers/scsi/qla2xxx/qla_target.c static void qlt_clear_tgt_db(struct qla_tgt *tgt);
tgt               203 drivers/scsi/qla2xxx/qla_target.c 	host = btree_lookup32(&vha->hw->tgt.host_map, key);
tgt               220 drivers/scsi/qla2xxx/qla_target.c 	BUG_ON(ha->tgt.tgt_vp_map == NULL);
tgt               222 drivers/scsi/qla2xxx/qla_target.c 		return ha->tgt.tgt_vp_map[vp_idx].vha;
tgt               231 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
tgt               233 drivers/scsi/qla2xxx/qla_target.c 	vha->hw->tgt.num_pend_cmds++;
tgt               234 drivers/scsi/qla2xxx/qla_target.c 	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
tgt               236 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.num_pend_cmds;
tgt               237 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
tgt               243 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
tgt               244 drivers/scsi/qla2xxx/qla_target.c 	vha->hw->tgt.num_pend_cmds--;
tgt               245 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
tgt               253 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt               256 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stop) {
tgt               290 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt               309 drivers/scsi/qla2xxx/qla_target.c 		} else if (tgt->tgt_stop) {
tgt               571 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt               594 drivers/scsi/qla2xxx/qla_target.c 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt               596 drivers/scsi/qla2xxx/qla_target.c 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt               611 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt               686 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
tgt               700 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->shutdown_sess(fcport);
tgt               701 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->put_sess(fcport);
tgt               713 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt               717 drivers/scsi/qla2xxx/qla_target.c 	if (!vha->hw->tgt.tgt_ops)
tgt               720 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt               721 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stop) {
tgt               722 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt               727 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt               732 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt               738 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt               741 drivers/scsi/qla2xxx/qla_target.c 			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt               749 drivers/scsi/qla2xxx/qla_target.c 			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt               761 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
tgt               773 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt               775 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops->put_sess(sess);
tgt               951 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = sess->tgt;
tgt              1019 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->free_session(sess);
tgt              1048 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1051 drivers/scsi/qla2xxx/qla_target.c 		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
tgt              1052 drivers/scsi/qla2xxx/qla_target.c 			tgt->sess_count--;
tgt              1108 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1115 drivers/scsi/qla2xxx/qla_target.c 	if (tgt && (tgt->sess_count == 0))
tgt              1116 drivers/scsi/qla2xxx/qla_target.c 		wake_up_all(&tgt->waitQ);
tgt              1120 drivers/scsi/qla2xxx/qla_target.c 	    (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
tgt              1157 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
tgt              1181 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1183 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1185 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1186 drivers/scsi/qla2xxx/qla_target.c 		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
tgt              1187 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1216 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = sess->tgt;
tgt              1224 drivers/scsi/qla2xxx/qla_target.c 		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
tgt              1225 drivers/scsi/qla2xxx/qla_target.c 			wake_up_all(&tgt->waitQ);
tgt              1272 drivers/scsi/qla2xxx/qla_target.c static void qlt_clear_tgt_db(struct qla_tgt *tgt)
tgt              1275 drivers/scsi/qla2xxx/qla_target.c 	scsi_qla_host_t *vha = tgt->vha;
tgt              1357 drivers/scsi/qla2xxx/qla_target.c 	sess->tgt = vha->vha_tgt.qla_tgt;
tgt              1370 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
tgt              1389 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1394 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1419 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              1423 drivers/scsi/qla2xxx/qla_target.c 	if (!vha->hw->tgt.tgt_ops)
tgt              1426 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt)
tgt              1429 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              1430 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stop) {
tgt              1431 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1435 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1440 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1452 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              1456 drivers/scsi/qla2xxx/qla_target.c static inline int test_tgt_sess_count(struct qla_tgt *tgt)
tgt              1458 drivers/scsi/qla2xxx/qla_target.c 	struct qla_hw_data *ha = tgt->ha;
tgt              1465 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1466 drivers/scsi/qla2xxx/qla_target.c 	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
tgt              1468 drivers/scsi/qla2xxx/qla_target.c 	    tgt, tgt->sess_count);
tgt              1469 drivers/scsi/qla2xxx/qla_target.c 	res = (tgt->sess_count == 0);
tgt              1470 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1476 drivers/scsi/qla2xxx/qla_target.c int qlt_stop_phase1(struct qla_tgt *tgt)
tgt              1478 drivers/scsi/qla2xxx/qla_target.c 	struct scsi_qla_host *vha = tgt->vha;
tgt              1479 drivers/scsi/qla2xxx/qla_target.c 	struct qla_hw_data *ha = tgt->ha;
tgt              1485 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stop || tgt->tgt_stopped) {
tgt              1500 drivers/scsi/qla2xxx/qla_target.c 	tgt->tgt_stop = 1;
tgt              1501 drivers/scsi/qla2xxx/qla_target.c 	qlt_clear_tgt_db(tgt);
tgt              1506 drivers/scsi/qla2xxx/qla_target.c 	    "Waiting for sess works (tgt %p)", tgt);
tgt              1507 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&tgt->sess_work_lock, flags);
tgt              1508 drivers/scsi/qla2xxx/qla_target.c 	while (!list_empty(&tgt->sess_works_list)) {
tgt              1509 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
tgt              1511 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&tgt->sess_work_lock, flags);
tgt              1513 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
tgt              1516 drivers/scsi/qla2xxx/qla_target.c 	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
tgt              1518 drivers/scsi/qla2xxx/qla_target.c 	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
tgt              1526 drivers/scsi/qla2xxx/qla_target.c 	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
tgt              1534 drivers/scsi/qla2xxx/qla_target.c void qlt_stop_phase2(struct qla_tgt *tgt)
tgt              1536 drivers/scsi/qla2xxx/qla_target.c 	scsi_qla_host_t *vha = tgt->vha;
tgt              1538 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stopped) {
tgt              1544 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt->tgt_stop) {
tgt              1552 drivers/scsi/qla2xxx/qla_target.c 	tgt->tgt_stop = 0;
tgt              1553 drivers/scsi/qla2xxx/qla_target.c 	tgt->tgt_stopped = 1;
tgt              1557 drivers/scsi/qla2xxx/qla_target.c 	    tgt);
tgt              1571 drivers/scsi/qla2xxx/qla_target.c static void qlt_release(struct qla_tgt *tgt)
tgt              1573 drivers/scsi/qla2xxx/qla_target.c 	scsi_qla_host_t *vha = tgt->vha;
tgt              1580 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt->tgt_stop && !tgt->tgt_stopped)
tgt              1581 drivers/scsi/qla2xxx/qla_target.c 		qlt_stop_phase1(tgt);
tgt              1583 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt->tgt_stopped)
tgt              1584 drivers/scsi/qla2xxx/qla_target.c 		qlt_stop_phase2(tgt);
tgt              1589 drivers/scsi/qla2xxx/qla_target.c 		h = &tgt->qphints[i];
tgt              1597 drivers/scsi/qla2xxx/qla_target.c 	kfree(tgt->qphints);
tgt              1602 drivers/scsi/qla2xxx/qla_target.c 	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
tgt              1603 drivers/scsi/qla2xxx/qla_target.c 		btree_remove64(&tgt->lun_qpair_map, key);
tgt              1605 drivers/scsi/qla2xxx/qla_target.c 	btree_destroy64(&tgt->lun_qpair_map);
tgt              1608 drivers/scsi/qla2xxx/qla_target.c 		if (ha->tgt.tgt_ops &&
tgt              1609 drivers/scsi/qla2xxx/qla_target.c 		    ha->tgt.tgt_ops->remove_target &&
tgt              1611 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.tgt_ops->remove_target(vha);
tgt              1616 drivers/scsi/qla2xxx/qla_target.c 	    "Release of tgt %p finished\n", tgt);
tgt              1618 drivers/scsi/qla2xxx/qla_target.c 	kfree(tgt);
tgt              1622 drivers/scsi/qla2xxx/qla_target.c static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
tgt              1630 drivers/scsi/qla2xxx/qla_target.c 		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
tgt              1636 drivers/scsi/qla2xxx/qla_target.c 	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
tgt              1639 drivers/scsi/qla2xxx/qla_target.c 	    type, prm, param, param_size, tgt);
tgt              1644 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&tgt->sess_work_lock, flags);
tgt              1645 drivers/scsi/qla2xxx/qla_target.c 	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
tgt              1646 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
tgt              1648 drivers/scsi/qla2xxx/qla_target.c 	schedule_work(&tgt->sess_work);
tgt              2000 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              2004 drivers/scsi/qla2xxx/qla_target.c 		h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
tgt              2006 drivers/scsi/qla2xxx/qla_target.c 			h = &tgt->qphints[0];
tgt              2008 drivers/scsi/qla2xxx/qla_target.c 		h = &tgt->qphints[0];
tgt              2032 drivers/scsi/qla2xxx/qla_target.c 	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
tgt              2101 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.tgt_ops->find_cmd_by_tag) {
tgt              2104 drivers/scsi/qla2xxx/qla_target.c 		abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
tgt              2159 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              2160 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
tgt              2165 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              2171 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              2339 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->free_mcmd(mcmd);
tgt              2377 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->free_mcmd(mcmd);
tgt              2729 drivers/scsi/qla2xxx/qla_target.c 	prm->tgt = cmd->tgt;
tgt              2903 drivers/scsi/qla2xxx/qla_target.c 	scsi_qla_host_t *vha = cmd->tgt->vha;
tgt              2924 drivers/scsi/qla2xxx/qla_target.c 	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
tgt              2947 drivers/scsi/qla2xxx/qla_target.c 		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
tgt              2964 drivers/scsi/qla2xxx/qla_target.c 		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
tgt              3347 drivers/scsi/qla2xxx/qla_target.c 	vha->hw->tgt.tgt_ops->free_cmd(cmd);
tgt              3356 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = cmd->tgt;
tgt              3364 drivers/scsi/qla2xxx/qla_target.c 	prm.tgt = tgt;
tgt              3381 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_ops->handle_data(cmd);
tgt              3516 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_ops->handle_data(cmd);
tgt              3522 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.tgt_ops->free_cmd(cmd);
tgt              3532 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
tgt              3691 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
tgt              3705 drivers/scsi/qla2xxx/qla_target.c 	vha->hw->tgt.leak_exchg_thresh_hold =
tgt              3709 drivers/scsi/qla2xxx/qla_target.c 	if (!list_empty(&vha->hw->tgt.q_full_list)) {
tgt              3711 drivers/scsi/qla2xxx/qla_target.c 		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
tgt              3719 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.num_qfull_cmds_alloc--;
tgt              3722 drivers/scsi/qla2xxx/qla_target.c 	vha->hw->tgt.num_qfull_cmds_dropped = 0;
tgt              3729 drivers/scsi/qla2xxx/qla_target.c 	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
tgt              3731 drivers/scsi/qla2xxx/qla_target.c 	if (vha->hw->tgt.leak_exchg_thresh_hold &&
tgt              3732 drivers/scsi/qla2xxx/qla_target.c 	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
tgt              3749 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = cmd->tgt;
tgt              3750 drivers/scsi/qla2xxx/qla_target.c 	struct scsi_qla_host *vha = tgt->vha;
tgt              4024 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->handle_data(cmd);
tgt              4043 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops->free_cmd(cmd);
tgt              4122 drivers/scsi/qla2xxx/qla_target.c 	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
tgt              4129 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops->put_sess(sess);
tgt              4146 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops->put_sess(sess);
tgt              4166 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              4172 drivers/scsi/qla2xxx/qla_target.c 	    ha->tgt.num_act_qpairs);
tgt              4174 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
tgt              4176 drivers/scsi/qla2xxx/qla_target.c 	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
tgt              4177 drivers/scsi/qla2xxx/qla_target.c 		btree_remove64(&tgt->lun_qpair_map, key);
tgt              4184 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
tgt              4191 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              4195 drivers/scsi/qla2xxx/qla_target.c 		h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
tgt              4205 drivers/scsi/qla2xxx/qla_target.c 				h = qla_qpair_to_hint(tgt, qpair);
tgt              4207 drivers/scsi/qla2xxx/qla_target.c 				rc = btree_insert64(&tgt->lun_qpair_map,
tgt              4225 drivers/scsi/qla2xxx/qla_target.c 					h = qla_qpair_to_hint(tgt, qp);
tgt              4227 drivers/scsi/qla2xxx/qla_target.c 					rc = btree_insert64(&tgt->lun_qpair_map,
tgt              4247 drivers/scsi/qla2xxx/qla_target.c 			h = qla_qpair_to_hint(tgt, qpair);
tgt              4249 drivers/scsi/qla2xxx/qla_target.c 			rc = btree_insert64(&tgt->lun_qpair_map,
tgt              4259 drivers/scsi/qla2xxx/qla_target.c 		h = &tgt->qphints[0];
tgt              4283 drivers/scsi/qla2xxx/qla_target.c 	cmd->tgt = vha->vha_tgt.qla_tgt;
tgt              4309 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              4315 drivers/scsi/qla2xxx/qla_target.c 	if (unlikely(tgt->tgt_stop)) {
tgt              4317 drivers/scsi/qla2xxx/qla_target.c 		    "New command while device %p is shutting down\n", tgt);
tgt              4325 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
tgt              4353 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->put_sess(sess);
tgt              4454 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              4455 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
tgt              4457 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              4497 drivers/scsi/qla2xxx/qla_target.c 	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
tgt              4521 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              4522 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
tgt              4523 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              4698 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              4701 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              4851 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              4913 drivers/scsi/qla2xxx/qla_target.c 			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
tgt              4916 drivers/scsi/qla2xxx/qla_target.c 			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
tgt              4940 drivers/scsi/qla2xxx/qla_target.c 			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
tgt              4953 drivers/scsi/qla2xxx/qla_target.c 				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
tgt              4979 drivers/scsi/qla2xxx/qla_target.c 				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
tgt              5015 drivers/scsi/qla2xxx/qla_target.c 			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
tgt              5056 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              5058 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              5091 drivers/scsi/qla2xxx/qla_target.c 		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              5093 drivers/scsi/qla2xxx/qla_target.c 		if (tgt->link_reinit_iocb_pending) {
tgt              5095 drivers/scsi/qla2xxx/qla_target.c 			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
tgt              5096 drivers/scsi/qla2xxx/qla_target.c 			tgt->link_reinit_iocb_pending = 0;
tgt              5157 drivers/scsi/qla2xxx/qla_target.c 		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              5164 drivers/scsi/qla2xxx/qla_target.c 		if (tgt->link_reinit_iocb_pending) {
tgt              5166 drivers/scsi/qla2xxx/qla_target.c 			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
tgt              5168 drivers/scsi/qla2xxx/qla_target.c 		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
tgt              5169 drivers/scsi/qla2xxx/qla_target.c 		tgt->link_reinit_iocb_pending = 1;
tgt              5280 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              5282 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              5342 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              5350 drivers/scsi/qla2xxx/qla_target.c 	if (unlikely(tgt->tgt_stop)) {
tgt              5352 drivers/scsi/qla2xxx/qla_target.c 			"New command while device %p is shutting down\n", tgt);
tgt              5356 drivers/scsi/qla2xxx/qla_target.c 	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
tgt              5357 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.num_qfull_cmds_dropped++;
tgt              5358 drivers/scsi/qla2xxx/qla_target.c 		if (vha->hw->tgt.num_qfull_cmds_dropped >
tgt              5361 drivers/scsi/qla2xxx/qla_target.c 				vha->hw->tgt.num_qfull_cmds_dropped;
tgt              5366 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.num_qfull_cmds_dropped);
tgt              5372 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_s_id
tgt              5385 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.num_qfull_cmds_dropped++;
tgt              5386 drivers/scsi/qla2xxx/qla_target.c 		if (vha->hw->tgt.num_qfull_cmds_dropped >
tgt              5389 drivers/scsi/qla2xxx/qla_target.c 				vha->hw->tgt.num_qfull_cmds_dropped;
tgt              5402 drivers/scsi/qla2xxx/qla_target.c 	cmd->tgt = vha->vha_tgt.qla_tgt;
tgt              5416 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
tgt              5417 drivers/scsi/qla2xxx/qla_target.c 	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
tgt              5419 drivers/scsi/qla2xxx/qla_target.c 	vha->hw->tgt.num_qfull_cmds_alloc++;
tgt              5420 drivers/scsi/qla2xxx/qla_target.c 	if (vha->hw->tgt.num_qfull_cmds_alloc >
tgt              5423 drivers/scsi/qla2xxx/qla_target.c 			vha->hw->tgt.num_qfull_cmds_alloc;
tgt              5424 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
tgt              5437 drivers/scsi/qla2xxx/qla_target.c 	if (list_empty(&ha->tgt.q_full_list))
tgt              5443 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
tgt              5444 drivers/scsi/qla2xxx/qla_target.c 	if (list_empty(&ha->tgt.q_full_list)) {
tgt              5445 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
tgt              5449 drivers/scsi/qla2xxx/qla_target.c 	list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
tgt              5450 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
tgt              5480 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.num_qfull_cmds_alloc--;
tgt              5495 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
tgt              5496 drivers/scsi/qla2xxx/qla_target.c 		list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
tgt              5497 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
tgt              5522 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
tgt              5540 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              5544 drivers/scsi/qla2xxx/qla_target.c 	if (unlikely(tgt == NULL)) {
tgt              5554 drivers/scsi/qla2xxx/qla_target.c 	tgt->atio_irq_cmd_count++;
tgt              5577 drivers/scsi/qla2xxx/qla_target.c 				tgt->atio_irq_cmd_count--;
tgt              5646 drivers/scsi/qla2xxx/qla_target.c 	tgt->atio_irq_cmd_count--;
tgt              5725 drivers/scsi/qla2xxx/qla_target.c 				ha->tgt.tgt_ops->free_mcmd(mcmd);
tgt              5736 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.tgt_ops->free_mcmd(mcmd);
tgt              5739 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->free_mcmd(mcmd);
tgt              5748 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              5750 drivers/scsi/qla2xxx/qla_target.c 	if (unlikely(tgt == NULL)) {
tgt              5850 drivers/scsi/qla2xxx/qla_target.c 		if (tgt->notify_ack_expected > 0) {
tgt              5857 drivers/scsi/qla2xxx/qla_target.c 			tgt->notify_ack_expected--;
tgt              5879 drivers/scsi/qla2xxx/qla_target.c 		if (tgt->abts_resp_expected > 0) {
tgt              5904 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              5907 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
tgt              5939 drivers/scsi/qla2xxx/qla_target.c 		if (tgt->link_reinit_iocb_pending) {
tgt              5941 drivers/scsi/qla2xxx/qla_target.c 			    (void *)&tgt->link_reinit_iocb,
tgt              5943 drivers/scsi/qla2xxx/qla_target.c 			tgt->link_reinit_iocb_pending = 0;
tgt              6037 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt              6063 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt              6160 drivers/scsi/qla2xxx/qla_target.c static void qlt_abort_work(struct qla_tgt *tgt,
tgt              6163 drivers/scsi/qla2xxx/qla_target.c 	struct scsi_qla_host *vha = tgt->vha;
tgt              6170 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
tgt              6172 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stop)
tgt              6177 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
tgt              6179 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
tgt              6184 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
tgt              6203 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
tgt              6205 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops->put_sess(sess);
tgt              6212 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
tgt              6221 drivers/scsi/qla2xxx/qla_target.c static void qlt_tmr_work(struct qla_tgt *tgt,
tgt              6225 drivers/scsi/qla2xxx/qla_target.c 	struct scsi_qla_host *vha = tgt->vha;
tgt              6235 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              6237 drivers/scsi/qla2xxx/qla_target.c 	if (tgt->tgt_stop)
tgt              6241 drivers/scsi/qla2xxx/qla_target.c 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
tgt              6243 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              6248 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              6270 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              6272 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops->put_sess(sess);
tgt              6279 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              6286 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
tgt              6287 drivers/scsi/qla2xxx/qla_target.c 	struct scsi_qla_host *vha = tgt->vha;
tgt              6290 drivers/scsi/qla2xxx/qla_target.c 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
tgt              6292 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&tgt->sess_work_lock, flags);
tgt              6293 drivers/scsi/qla2xxx/qla_target.c 	while (!list_empty(&tgt->sess_works_list)) {
tgt              6295 drivers/scsi/qla2xxx/qla_target.c 		    tgt->sess_works_list.next, typeof(*prm),
tgt              6304 drivers/scsi/qla2xxx/qla_target.c 		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
tgt              6308 drivers/scsi/qla2xxx/qla_target.c 			qlt_abort_work(tgt, prm);
tgt              6311 drivers/scsi/qla2xxx/qla_target.c 			qlt_tmr_work(tgt, prm);
tgt              6318 drivers/scsi/qla2xxx/qla_target.c 		spin_lock_irqsave(&tgt->sess_work_lock, flags);
tgt              6322 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
tgt              6328 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt;
tgt              6346 drivers/scsi/qla2xxx/qla_target.c 	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
tgt              6347 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt) {
tgt              6353 drivers/scsi/qla2xxx/qla_target.c 	tgt->qphints = kcalloc(ha->max_qpairs + 1,
tgt              6356 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt->qphints) {
tgt              6357 drivers/scsi/qla2xxx/qla_target.c 		kfree(tgt);
tgt              6366 drivers/scsi/qla2xxx/qla_target.c 	rc = btree_init64(&tgt->lun_qpair_map);
tgt              6368 drivers/scsi/qla2xxx/qla_target.c 		kfree(tgt->qphints);
tgt              6369 drivers/scsi/qla2xxx/qla_target.c 		kfree(tgt);
tgt              6374 drivers/scsi/qla2xxx/qla_target.c 	h = &tgt->qphints[0];
tgt              6385 drivers/scsi/qla2xxx/qla_target.c 		h = &tgt->qphints[i + 1];
tgt              6396 drivers/scsi/qla2xxx/qla_target.c 	tgt->ha = ha;
tgt              6397 drivers/scsi/qla2xxx/qla_target.c 	tgt->vha = base_vha;
tgt              6398 drivers/scsi/qla2xxx/qla_target.c 	init_waitqueue_head(&tgt->waitQ);
tgt              6399 drivers/scsi/qla2xxx/qla_target.c 	INIT_LIST_HEAD(&tgt->del_sess_list);
tgt              6400 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_init(&tgt->sess_work_lock);
tgt              6401 drivers/scsi/qla2xxx/qla_target.c 	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
tgt              6402 drivers/scsi/qla2xxx/qla_target.c 	INIT_LIST_HEAD(&tgt->sess_works_list);
tgt              6403 drivers/scsi/qla2xxx/qla_target.c 	atomic_set(&tgt->tgt_global_resets_count, 0);
tgt              6405 drivers/scsi/qla2xxx/qla_target.c 	base_vha->vha_tgt.qla_tgt = tgt;
tgt              6411 drivers/scsi/qla2xxx/qla_target.c 	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
tgt              6414 drivers/scsi/qla2xxx/qla_target.c 	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
tgt              6417 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
tgt              6418 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.tgt_ops->add_target(base_vha);
tgt              6449 drivers/scsi/qla2xxx/qla_target.c 	btree_for_each_safe32(&ha->tgt.host_map, key, node)
tgt              6450 drivers/scsi/qla2xxx/qla_target.c 		btree_remove32(&ha->tgt.host_map, key);
tgt              6452 drivers/scsi/qla2xxx/qla_target.c 	btree_destroy32(&ha->tgt.host_map);
tgt              6477 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt;
tgt              6486 drivers/scsi/qla2xxx/qla_target.c 	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
tgt              6487 drivers/scsi/qla2xxx/qla_target.c 		vha = tgt->vha;
tgt              6507 drivers/scsi/qla2xxx/qla_target.c 		if (tgt->tgt_stop) {
tgt              6553 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_ops = NULL;
tgt              6608 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              6612 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt) {
tgt              6622 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.num_act_qpairs > ha->max_qpairs)
tgt              6623 drivers/scsi/qla2xxx/qla_target.c 		ha->tgt.num_act_qpairs = ha->max_qpairs;
tgt              6625 drivers/scsi/qla2xxx/qla_target.c 	tgt->tgt_stopped = 0;
tgt              6653 drivers/scsi/qla2xxx/qla_target.c 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
tgt              6656 drivers/scsi/qla2xxx/qla_target.c 	if (!tgt) {
tgt              6696 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
tgt              6732 drivers/scsi/qla2xxx/qla_target.c 	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
tgt              6737 drivers/scsi/qla2xxx/qla_target.c 	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
tgt              6758 drivers/scsi/qla2xxx/qla_target.c 	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
tgt              6759 drivers/scsi/qla2xxx/qla_target.c 	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
tgt              6760 drivers/scsi/qla2xxx/qla_target.c 		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
tgt              6763 drivers/scsi/qla2xxx/qla_target.c 		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
tgt              6784 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.atio_ring_index++;
tgt              6785 drivers/scsi/qla2xxx/qla_target.c 			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
tgt              6786 drivers/scsi/qla2xxx/qla_target.c 				ha->tgt.atio_ring_index = 0;
tgt              6787 drivers/scsi/qla2xxx/qla_target.c 				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
tgt              6789 drivers/scsi/qla2xxx/qla_target.c 				ha->tgt.atio_ring_ptr++;
tgt              6792 drivers/scsi/qla2xxx/qla_target.c 			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
tgt              6798 drivers/scsi/qla2xxx/qla_target.c 	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
tgt              6850 drivers/scsi/qla2xxx/qla_target.c 		if (!ha->tgt.saved_set) {
tgt              6852 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_exchange_count = nv->exchange_count;
tgt              6853 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_firmware_options_1 =
tgt              6855 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_firmware_options_2 =
tgt              6857 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_firmware_options_3 =
tgt              6859 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_set = 1;
tgt              6905 drivers/scsi/qla2xxx/qla_target.c 		if (ha->tgt.saved_set) {
tgt              6906 drivers/scsi/qla2xxx/qla_target.c 			nv->exchange_count = ha->tgt.saved_exchange_count;
tgt              6908 drivers/scsi/qla2xxx/qla_target.c 			    ha->tgt.saved_firmware_options_1;
tgt              6910 drivers/scsi/qla2xxx/qla_target.c 			    ha->tgt.saved_firmware_options_2;
tgt              6912 drivers/scsi/qla2xxx/qla_target.c 			    ha->tgt.saved_firmware_options_3;
tgt              6940 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.node_name_set) {
tgt              6941 drivers/scsi/qla2xxx/qla_target.c 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
tgt              6956 drivers/scsi/qla2xxx/qla_target.c 		if (!ha->tgt.saved_set) {
tgt              6958 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_exchange_count = nv->exchange_count;
tgt              6959 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_firmware_options_1 =
tgt              6961 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_firmware_options_2 =
tgt              6963 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_firmware_options_3 =
tgt              6965 drivers/scsi/qla2xxx/qla_target.c 			ha->tgt.saved_set = 1;
tgt              7008 drivers/scsi/qla2xxx/qla_target.c 		if (ha->tgt.saved_set) {
tgt              7009 drivers/scsi/qla2xxx/qla_target.c 			nv->exchange_count = ha->tgt.saved_exchange_count;
tgt              7011 drivers/scsi/qla2xxx/qla_target.c 			    ha->tgt.saved_firmware_options_1;
tgt              7013 drivers/scsi/qla2xxx/qla_target.c 			    ha->tgt.saved_firmware_options_2;
tgt              7015 drivers/scsi/qla2xxx/qla_target.c 			    ha->tgt.saved_firmware_options_3;
tgt              7043 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.node_name_set) {
tgt              7044 drivers/scsi/qla2xxx/qla_target.c 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
tgt              7098 drivers/scsi/qla2xxx/qla_target.c 	rc = btree_init32(&ha->tgt.host_map);
tgt              7118 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
tgt              7122 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
tgt              7140 drivers/scsi/qla2xxx/qla_target.c 	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
tgt              7142 drivers/scsi/qla2xxx/qla_target.c 	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
tgt              7182 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
tgt              7185 drivers/scsi/qla2xxx/qla_target.c 	if (!ha->tgt.tgt_vp_map)
tgt              7188 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
tgt              7189 drivers/scsi/qla2xxx/qla_target.c 	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
tgt              7190 drivers/scsi/qla2xxx/qla_target.c 	    &ha->tgt.atio_dma, GFP_KERNEL);
tgt              7191 drivers/scsi/qla2xxx/qla_target.c 	if (!ha->tgt.atio_ring) {
tgt              7192 drivers/scsi/qla2xxx/qla_target.c 		kfree(ha->tgt.tgt_vp_map);
tgt              7204 drivers/scsi/qla2xxx/qla_target.c 	if (ha->tgt.atio_ring) {
tgt              7205 drivers/scsi/qla2xxx/qla_target.c 		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
tgt              7206 drivers/scsi/qla2xxx/qla_target.c 		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
tgt              7207 drivers/scsi/qla2xxx/qla_target.c 		    ha->tgt.atio_dma);
tgt              7209 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.atio_ring = NULL;
tgt              7210 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.atio_dma = 0;
tgt              7211 drivers/scsi/qla2xxx/qla_target.c 	kfree(ha->tgt.tgt_vp_map);
tgt              7212 drivers/scsi/qla2xxx/qla_target.c 	ha->tgt.tgt_vp_map = NULL;
tgt              7230 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
tgt              7233 drivers/scsi/qla2xxx/qla_target.c 		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
tgt              7237 drivers/scsi/qla2xxx/qla_target.c 			rc = btree_insert32(&vha->hw->tgt.host_map,
tgt              7247 drivers/scsi/qla2xxx/qla_target.c 		btree_update32(&vha->hw->tgt.host_map, key, vha);
tgt              7250 drivers/scsi/qla2xxx/qla_target.c 		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
tgt              7255 drivers/scsi/qla2xxx/qla_target.c 		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
tgt              7257 drivers/scsi/qla2xxx/qla_target.c 			btree_remove32(&vha->hw->tgt.host_map, key);
tgt               895 drivers/scsi/qla2xxx/qla_target.h 	struct qla_tgt *tgt;	/* to save extra sess dereferences */
tgt               968 drivers/scsi/qla2xxx/qla_target.h 	struct qla_tgt *tgt;
tgt               360 drivers/scsi/qla2xxx/qla_tmpl.c 		struct atio *atr = ha->tgt.atio_ring;
tgt               363 drivers/scsi/qla2xxx/qla_tmpl.c 			length = ha->tgt.atio_q_length;
tgt               654 drivers/scsi/qla2xxx/qla_tmpl.c 		struct atio *atr = ha->tgt.atio_ring_ptr;
tgt               659 drivers/scsi/qla2xxx/qla_tmpl.c 			qla27xx_insert32(ha->tgt.atio_q_in ?
tgt               660 drivers/scsi/qla2xxx/qla_tmpl.c 			    readl(ha->tgt.atio_q_in) : 0, buf, len);
tgt               349 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tgt               351 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
tgt               639 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
tgt               681 drivers/scsi/qla2xxx/tcm_qla2xxx.c 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
tgt              1366 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	struct qla_tgt *tgt = sess->tgt;
tgt              1367 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	struct qla_hw_data *ha = tgt->ha;
tgt              1410 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt              1415 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
tgt              1474 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	struct qla_tgt *tgt = sess->tgt;
tgt              1475 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	struct qla_hw_data *ha = tgt->ha;
tgt              1600 drivers/scsi/qla2xxx/tcm_qla2xxx.c 	ha->tgt.tgt_ops = &tcm_qla2xxx_template;
tgt               980 drivers/scsi/qlogicpti.c 	int tgt = sdev->id;
tgt               986 drivers/scsi/qlogicpti.c 		qpti->dev_param[tgt].device_flags |= 0x10;
tgt               988 drivers/scsi/qlogicpti.c 		qpti->dev_param[tgt].synchronous_offset = 0;
tgt               989 drivers/scsi/qlogicpti.c 		qpti->dev_param[tgt].synchronous_period = 0;
tgt               993 drivers/scsi/qlogicpti.c 		qpti->dev_param[tgt].device_flags |= 0x20;
tgt               996 drivers/scsi/qlogicpti.c 	param[1] = (tgt << 8);
tgt               997 drivers/scsi/qlogicpti.c 	param[2] = (qpti->dev_param[tgt].device_flags << 8);
tgt               998 drivers/scsi/qlogicpti.c 	if (qpti->dev_param[tgt].device_flags & 0x10) {
tgt               999 drivers/scsi/qlogicpti.c 		param[3] = (qpti->dev_param[tgt].synchronous_offset << 8) |
tgt              1000 drivers/scsi/qlogicpti.c 			qpti->dev_param[tgt].synchronous_period;
tgt               166 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
tgt               167 drivers/scsi/snic/snic_disc.c 	struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
tgt               170 drivers/scsi/snic/snic_disc.c 	SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
tgt               171 drivers/scsi/snic/snic_disc.c 	scsi_scan_target(&tgt->dev,
tgt               172 drivers/scsi/snic/snic_disc.c 			 tgt->channel,
tgt               173 drivers/scsi/snic/snic_disc.c 			 tgt->scsi_tgt_id,
tgt               178 drivers/scsi/snic/snic_disc.c 	tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
tgt               189 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = NULL;
tgt               192 drivers/scsi/snic/snic_disc.c 		tgt = list_entry(cur, struct snic_tgt, list);
tgt               193 drivers/scsi/snic/snic_disc.c 		if (tgt->id == le32_to_cpu(tgtid->tgt_id))
tgt               194 drivers/scsi/snic/snic_disc.c 			return tgt;
tgt               195 drivers/scsi/snic/snic_disc.c 		tgt = NULL;
tgt               198 drivers/scsi/snic/snic_disc.c 	return tgt;
tgt               207 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = dev_to_tgt(dev);
tgt               209 drivers/scsi/snic/snic_disc.c 	SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
tgt               211 drivers/scsi/snic/snic_disc.c 		       tgt->id,
tgt               214 drivers/scsi/snic/snic_disc.c 	SNIC_BUG_ON(!list_empty(&tgt->list));
tgt               215 drivers/scsi/snic/snic_disc.c 	kfree(tgt);
tgt               224 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
tgt               225 drivers/scsi/snic/snic_disc.c 	struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
tgt               227 drivers/scsi/snic/snic_disc.c 	if (tgt->flags & SNIC_TGT_SCAN_PENDING)
tgt               231 drivers/scsi/snic/snic_disc.c 	scsi_target_block(&tgt->dev);
tgt               234 drivers/scsi/snic/snic_disc.c 	snic_tgt_scsi_abort_io(tgt);
tgt               237 drivers/scsi/snic/snic_disc.c 	scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
tgt               240 drivers/scsi/snic/snic_disc.c 	scsi_remove_target(&tgt->dev);  /* ?? */
tgt               241 drivers/scsi/snic/snic_disc.c 	device_del(&tgt->dev);
tgt               242 drivers/scsi/snic/snic_disc.c 	put_device(&tgt->dev);
tgt               251 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = NULL;
tgt               255 drivers/scsi/snic/snic_disc.c 	tgt = snic_tgt_lookup(snic, tgtid);
tgt               256 drivers/scsi/snic/snic_disc.c 	if (tgt) {
tgt               258 drivers/scsi/snic/snic_disc.c 		return tgt;
tgt               261 drivers/scsi/snic/snic_disc.c 	tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
tgt               262 drivers/scsi/snic/snic_disc.c 	if (!tgt) {
tgt               266 drivers/scsi/snic/snic_disc.c 		return tgt;
tgt               269 drivers/scsi/snic/snic_disc.c 	INIT_LIST_HEAD(&tgt->list);
tgt               270 drivers/scsi/snic/snic_disc.c 	tgt->id = le32_to_cpu(tgtid->tgt_id);
tgt               271 drivers/scsi/snic/snic_disc.c 	tgt->channel = 0;
tgt               274 drivers/scsi/snic/snic_disc.c 	tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
tgt               279 drivers/scsi/snic/snic_disc.c 	tgt->tdata.disc_id = 0;
tgt               280 drivers/scsi/snic/snic_disc.c 	tgt->state = SNIC_TGT_STAT_INIT;
tgt               281 drivers/scsi/snic/snic_disc.c 	device_initialize(&tgt->dev);
tgt               282 drivers/scsi/snic/snic_disc.c 	tgt->dev.parent = get_device(&snic->shost->shost_gendev);
tgt               283 drivers/scsi/snic/snic_disc.c 	tgt->dev.release = snic_tgt_dev_release;
tgt               284 drivers/scsi/snic/snic_disc.c 	INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
tgt               285 drivers/scsi/snic/snic_disc.c 	INIT_WORK(&tgt->del_work, snic_tgt_del);
tgt               286 drivers/scsi/snic/snic_disc.c 	switch (tgt->tdata.typ) {
tgt               288 drivers/scsi/snic/snic_disc.c 		dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
tgt               289 drivers/scsi/snic/snic_disc.c 			     snic->shost->host_no, tgt->channel, tgt->id);
tgt               293 drivers/scsi/snic/snic_disc.c 		dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
tgt               294 drivers/scsi/snic/snic_disc.c 			     snic->shost->host_no, tgt->channel, tgt->id);
tgt               299 drivers/scsi/snic/snic_disc.c 		dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
tgt               300 drivers/scsi/snic/snic_disc.c 			     snic->shost->host_no, tgt->channel, tgt->id);
tgt               305 drivers/scsi/snic/snic_disc.c 	list_add_tail(&tgt->list, &snic->disc.tgt_list);
tgt               306 drivers/scsi/snic/snic_disc.c 	tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
tgt               307 drivers/scsi/snic/snic_disc.c 	tgt->state = SNIC_TGT_STAT_ONLINE;
tgt               312 drivers/scsi/snic/snic_disc.c 		       tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
tgt               314 drivers/scsi/snic/snic_disc.c 	ret = device_add(&tgt->dev);
tgt               321 drivers/scsi/snic/snic_disc.c 		kfree(tgt);
tgt               322 drivers/scsi/snic/snic_disc.c 		tgt = NULL;
tgt               324 drivers/scsi/snic/snic_disc.c 		return tgt;
tgt               327 drivers/scsi/snic/snic_disc.c 	SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
tgt               329 drivers/scsi/snic/snic_disc.c 	scsi_queue_work(snic->shost, &tgt->scan_work);
tgt               331 drivers/scsi/snic/snic_disc.c 	return tgt;
tgt               340 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = NULL;
tgt               374 drivers/scsi/snic/snic_disc.c 		tgt = snic_tgt_create(snic, &tgtid[i]);
tgt               375 drivers/scsi/snic/snic_disc.c 		if (!tgt) {
tgt               544 drivers/scsi/snic/snic_disc.c 	struct snic_tgt *tgt = NULL;
tgt               554 drivers/scsi/snic/snic_disc.c 		tgt = list_entry(cur, struct snic_tgt, list);
tgt               555 drivers/scsi/snic/snic_disc.c 		tgt->state = SNIC_TGT_STAT_DEL;
tgt               556 drivers/scsi/snic/snic_disc.c 		list_del_init(&tgt->list);
tgt               557 drivers/scsi/snic/snic_disc.c 		SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
tgt               558 drivers/scsi/snic/snic_disc.c 		queue_work(snic_glob->event_q, &tgt->del_work);
tgt               559 drivers/scsi/snic/snic_disc.c 		tgt = NULL;
tgt               114 drivers/scsi/snic/snic_disc.h snic_tgt_chkready(struct snic_tgt *tgt)
tgt               116 drivers/scsi/snic/snic_disc.h 	if (tgt->state == SNIC_TGT_STAT_ONLINE)
tgt                65 drivers/scsi/snic/snic_main.c 	struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
tgt                67 drivers/scsi/snic/snic_main.c 	if (!tgt || snic_tgt_chkready(tgt))
tgt               241 drivers/scsi/snic/snic_scsi.c 		      struct snic_tgt *tgt,
tgt               273 drivers/scsi/snic/snic_scsi.c 	rqi->tgt_id = tgt->id;
tgt               335 drivers/scsi/snic/snic_scsi.c 	struct snic_tgt *tgt = NULL;
tgt               339 drivers/scsi/snic/snic_scsi.c 	tgt = starget_to_tgt(scsi_target(sc->device));
tgt               340 drivers/scsi/snic/snic_scsi.c 	ret = snic_tgt_chkready(tgt);
tgt               342 drivers/scsi/snic/snic_scsi.c 		SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
tgt               361 drivers/scsi/snic/snic_scsi.c 	ret = snic_issue_scsi_req(snic, tgt, sc);
tgt              1514 drivers/scsi/snic/snic_scsi.c 	struct snic_tgt *tgt = NULL;
tgt              1520 drivers/scsi/snic/snic_scsi.c 	tgt = starget_to_tgt(scsi_target(sc->device));
tgt              1521 drivers/scsi/snic/snic_scsi.c 	if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
tgt              1734 drivers/scsi/snic/snic_scsi.c 	struct snic_tgt *tgt = NULL;
tgt              1799 drivers/scsi/snic/snic_scsi.c 	tgt = starget_to_tgt(scsi_target(sc->device));
tgt              1800 drivers/scsi/snic/snic_scsi.c 	if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
tgt              2107 drivers/scsi/snic/snic_scsi.c 	struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
tgt              2109 drivers/scsi/snic/snic_scsi.c 	if (tgt->tdata.typ == SNIC_TGT_DAS)
tgt              2616 drivers/scsi/snic/snic_scsi.c snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
tgt              2625 drivers/scsi/snic/snic_scsi.c 	if (!tgt)
tgt              2628 drivers/scsi/snic/snic_scsi.c 	snic = shost_priv(snic_tgt_to_shost(tgt));
tgt              2631 drivers/scsi/snic/snic_scsi.c 	if (tgt->tdata.typ == SNIC_TGT_DAS)
tgt              2648 drivers/scsi/snic/snic_scsi.c 		if (sc_tgt != tgt) {
tgt               976 drivers/scsi/xen-scsifront.c 	unsigned int hst, chn, tgt, lun;
tgt               998 drivers/scsi/xen-scsifront.c 				   "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
tgt              1015 drivers/scsi/xen-scsifront.c 			if (scsi_add_device(info->host, chn, tgt, lun)) {
tgt              1029 drivers/scsi/xen-scsifront.c 			sdev = scsi_device_lookup(info->host, chn, tgt, lun);
tgt               181 drivers/vfio/pci/vfio_pci_nvlink2.c 		.tgt = data->gpu_tgt
tgt               214 drivers/vfio/pci/vfio_pci_nvlink2.c 	u64 tgt = 0;
tgt               245 drivers/vfio/pci/vfio_pci_nvlink2.c 	if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
tgt               255 drivers/vfio/pci/vfio_pci_nvlink2.c 	data->gpu_tgt = tgt;
tgt               368 drivers/vfio/pci/vfio_pci_nvlink2.c 		.tgt = data->gpu_tgt
tgt               401 drivers/vfio/pci/vfio_pci_nvlink2.c 	u64 tgt = 0;
tgt               429 drivers/vfio/pci/vfio_pci_nvlink2.c 	if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
tgt               444 drivers/vfio/pci/vfio_pci_nvlink2.c 	data->gpu_tgt = tgt;
tgt                75 drivers/xen/xen-scsiback.c 	unsigned int tgt;		/* target  */
tgt               637 drivers/xen/xen-scsiback.c 		    (entry->v.tgt == v->tgt) &&
tgt               700 drivers/xen/xen-scsiback.c 	vir.tgt = ring_req->id;
tgt               706 drivers/xen/xen-scsiback.c 			 vir.chn, vir.tgt, vir.lun);
tgt               882 drivers/xen/xen-scsiback.c 		    (entry->v.tgt == v->tgt) &&
tgt              1088 drivers/xen/xen-scsiback.c 			   &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
tgt               162 fs/cifs/cifssmb.c 		const char *tgt = dfs_cache_get_tgt_name(it);
tgt               164 fs/cifs/cifssmb.c 		extract_unc_hostname(tgt, &dfs_host, &dfs_host_len);
tgt               175 fs/cifs/cifssmb.c 		scnprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
tgt              4467 fs/cifs/connect.c 	const char *tgt = dfs_cache_get_tgt_name(tgt_it);
tgt              4468 fs/cifs/connect.c 	int len = strlen(tgt) + 2;
tgt              4474 fs/cifs/connect.c 	scnprintf(new_unc, len, "\\%s", tgt);
tgt               727 fs/cifs/dfs_cache.c 		     struct dfs_info3_param *ref, const char *tgt)
tgt               741 fs/cifs/dfs_cache.c 	ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL);
tgt               196 fs/cifs/smb2pdu.c 		const char *tgt = dfs_cache_get_tgt_name(it);
tgt               198 fs/cifs/smb2pdu.c 		extract_unc_hostname(tgt, &dfs_host, &dfs_host_len);
tgt               209 fs/cifs/smb2pdu.c 		scnprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
tgt              3523 include/linux/skbuff.h int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
tgt                46 include/uapi/linux/netfilter_arp/arp_tables.h 	struct in_addr src, tgt;
tgt               401 include/uapi/linux/vfio.h 	__u64 tgt;
tgt              1563 kernel/locking/lockdep.c 			     struct held_lock *tgt,
tgt              1567 kernel/locking/lockdep.c 	struct lock_class *target = hlock_class(tgt);
tgt              3299 net/core/skbuff.c int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
tgt              3308 net/core/skbuff.c 	if (skb_zcopy(tgt) || skb_zcopy(skb))
tgt              3313 net/core/skbuff.c 	to = skb_shinfo(tgt)->nr_frags;
tgt              3320 net/core/skbuff.c 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
tgt              3329 net/core/skbuff.c 			    skb_prepare_for_shift(tgt))
tgt              3334 net/core/skbuff.c 			fragto = &skb_shinfo(tgt)->frags[merge];
tgt              3351 net/core/skbuff.c 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
tgt              3359 net/core/skbuff.c 		fragto = &skb_shinfo(tgt)->frags[to];
tgt              3383 net/core/skbuff.c 	skb_shinfo(tgt)->nr_frags = to;
tgt              3387 net/core/skbuff.c 		fragto = &skb_shinfo(tgt)->frags[merge];
tgt              3405 net/core/skbuff.c 	tgt->ip_summed = CHECKSUM_PARTIAL;
tgt              3412 net/core/skbuff.c 	tgt->len += shiftlen;
tgt              3413 net/core/skbuff.c 	tgt->data_len += shiftlen;
tgt              3414 net/core/skbuff.c 	tgt->truesize += shiftlen;
tgt               128 net/ipv4/netfilter/arp_tables.c 		    (tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr))
tgt               118 net/smc/smc_cdc.h static inline void smc_curs_copy(union smc_host_cursor *tgt,
tgt               126 net/smc/smc_cdc.h 	tgt->acurs = src->acurs;
tgt               129 net/smc/smc_cdc.h 	atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
tgt               133 net/smc/smc_cdc.h static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
tgt               141 net/smc/smc_cdc.h 	tgt->acurs = src->acurs;
tgt               144 net/smc/smc_cdc.h 	atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
tgt               148 net/smc/smc_cdc.h static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
tgt               156 net/smc/smc_cdc.h 	tgt->acurs = src->acurs;
tgt               159 net/smc/smc_cdc.h 	atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
tgt              1602 tools/include/nolibc/nolibc.h int sys_mount(const char *src, const char *tgt, const char *fst,
tgt              1605 tools/include/nolibc/nolibc.h 	return my_syscall5(__NR_mount, src, tgt, fst, flags, data);
tgt              2024 tools/include/nolibc/nolibc.h int mount(const char *src, const char *tgt,
tgt              2028 tools/include/nolibc/nolibc.h 	int ret = sys_mount(src, tgt, fst, flags, data);
tgt                75 tools/perf/util/probe-file.h static inline struct probe_cache *probe_cache__new(const char *tgt __maybe_unused, struct nsinfo *nsi __maybe_unused)