lgr               668 fs/nfs/blocklayout/blocklayout.c bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
lgr               672 fs/nfs/blocklayout/blocklayout.c 		.mode = lgr->range.iomode,
lgr               673 fs/nfs/blocklayout/blocklayout.c 		.start = lgr->range.offset >> SECTOR_SHIFT,
lgr               674 fs/nfs/blocklayout/blocklayout.c 		.inval = lgr->range.offset >> SECTOR_SHIFT,
lgr               675 fs/nfs/blocklayout/blocklayout.c 		.cowread = lgr->range.offset >> SECTOR_SHIFT,
lgr               699 fs/nfs/blocklayout/blocklayout.c 			lgr->layoutp->pages, lgr->layoutp->len);
lgr               720 fs/nfs/blocklayout/blocklayout.c 	if (lgr->range.offset + lgr->range.length !=
lgr               600 fs/nfs/filelayout/filelayout.c 			struct nfs4_layoutget_res *lgr,
lgr               608 fs/nfs/filelayout/filelayout.c 	if (lgr->range.offset != 0 ||
lgr               609 fs/nfs/filelayout/filelayout.c 	    lgr->range.length != NFS4_MAX_UINT64) {
lgr               615 fs/nfs/filelayout/filelayout.c 	if (fl->pattern_offset > lgr->range.offset) {
lgr               651 fs/nfs/filelayout/filelayout.c 			 struct nfs4_layoutget_res *lgr,
lgr               667 fs/nfs/filelayout/filelayout.c 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
lgr               821 fs/nfs/filelayout/filelayout.c 		      struct nfs4_layoutget_res *lgr,
lgr               832 fs/nfs/filelayout/filelayout.c 	rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
lgr               833 fs/nfs/filelayout/filelayout.c 	if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
lgr               265 fs/nfs/flexfilelayout/flexfilelayout.c static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
lgr               272 fs/nfs/flexfilelayout/flexfilelayout.c 	if (lgr->range.offset != 0 ||
lgr               273 fs/nfs/flexfilelayout/flexfilelayout.c 	    lgr->range.length != NFS4_MAX_UINT64) {
lgr               365 fs/nfs/flexfilelayout/flexfilelayout.c 		     struct nfs4_layoutget_res *lgr,
lgr               383 fs/nfs/flexfilelayout/flexfilelayout.c 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
lgr               384 fs/nfs/flexfilelayout/flexfilelayout.c 			      lgr->layoutp->len);
lgr               510 fs/nfs/flexfilelayout/flexfilelayout.c 		if (lgr->range.iomode == IOMODE_READ)
lgr               518 fs/nfs/flexfilelayout/flexfilelayout.c 			if (lgr->range.iomode == IOMODE_READ) {
lgr               530 fs/nfs/flexfilelayout/flexfilelayout.c 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
lgr               548 fs/nfs/flexfilelayout/flexfilelayout.c 	rc = ff_layout_check_layout(lgr);
lgr               137 fs/nfs/pnfs.h  	struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
lgr               362 net/smc/af_smc.c 	struct smc_link_group *lgr = smc->conn.lgr;
lgr               367 net/smc/af_smc.c 	link = &lgr->lnk[SMC_SINGLE_LINK];
lgr               448 net/smc/af_smc.c 	if (smc->conn.lgr->is_smcd)
lgr               513 net/smc/af_smc.c 		smc_lgr_forget(smc->conn.lgr);
lgr               514 net/smc/af_smc.c 	if (smc->conn.lgr->is_smcd)
lgr               607 net/smc/af_smc.c 	link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
lgr               997 net/smc/af_smc.c 	struct smc_link_group *lgr = smc->conn.lgr;
lgr              1002 net/smc/af_smc.c 	link = &lgr->lnk[SMC_SINGLE_LINK];
lgr              1100 net/smc/af_smc.c 		smc_lgr_forget(new_smc->conn.lgr);
lgr              1164 net/smc/af_smc.c 	if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
lgr              1165 net/smc/af_smc.c 			    new_smc->conn.lgr->vlan_id,
lgr              1166 net/smc/af_smc.c 			    new_smc->conn.lgr->smcd)) {
lgr              1168 net/smc/af_smc.c 			smc_lgr_forget(new_smc->conn.lgr);
lgr              1176 net/smc/af_smc.c 			smc_lgr_forget(new_smc->conn.lgr);
lgr              1187 net/smc/af_smc.c 	struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
lgr              1203 net/smc/af_smc.c 	struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
lgr               123 net/smc/smc.h  	struct smc_link_group	*lgr;		/* link group of connection */
lgr                60 net/smc/smc_cdc.c 	struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
lgr                98 net/smc/smc_cdc.c 	link = &conn->lgr->lnk[SMC_SINGLE_LINK];
lgr               134 net/smc/smc_cdc.c 	if (conn->lgr->is_smcd) {
lgr               165 net/smc/smc_cdc.c 	struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
lgr               356 net/smc/smc_cdc.c 	struct smc_link_group *lgr;
lgr               365 net/smc/smc_cdc.c 	lgr = smc_get_lgr(link);
lgr               366 net/smc/smc_cdc.c 	read_lock_bh(&lgr->conns_lock);
lgr               367 net/smc/smc_cdc.c 	conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
lgr               368 net/smc/smc_cdc.c 	read_unlock_bh(&lgr->conns_lock);
lgr               293 net/smc/smc_cdc.h 	if (conn->lgr->is_smcd)
lgr               351 net/smc/smc_clc.c 			smc->conn.lgr->sync_err = 1;
lgr               352 net/smc/smc_clc.c 			smc_lgr_terminate(smc->conn.lgr);
lgr               375 net/smc/smc_clc.c 	if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
lgr               483 net/smc/smc_clc.c 	if (smc->conn.lgr->is_smcd) {
lgr               489 net/smc/smc_clc.c 		cclc.gid = conn->lgr->smcd->local_gid;
lgr               493 net/smc/smc_clc.c 		memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
lgr               498 net/smc/smc_clc.c 		link = &conn->lgr->lnk[SMC_SINGLE_LINK];
lgr               555 net/smc/smc_clc.c 	if (new_smc->conn.lgr->is_smcd) {
lgr               561 net/smc/smc_clc.c 		aclc.gid = conn->lgr->smcd->local_gid;
lgr               565 net/smc/smc_clc.c 		memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
lgr               574 net/smc/smc_clc.c 		link = &conn->lgr->lnk[SMC_SINGLE_LINK];
lgr                42 net/smc/smc_core.c static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
lgr                45 net/smc/smc_core.c static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
lgr                51 net/smc/smc_core.c 	mod_delayed_work(system_wq, &lgr->free_work,
lgr                52 net/smc/smc_core.c 			 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
lgr                56 net/smc/smc_core.c void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
lgr                58 net/smc/smc_core.c 	mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST);
lgr                72 net/smc/smc_core.c 	link = &conn->lgr->conns_all.rb_node;
lgr                85 net/smc/smc_core.c 	rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
lgr               104 net/smc/smc_core.c 		if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
lgr               108 net/smc/smc_core.c 	conn->lgr->conns_num++;
lgr               116 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               118 net/smc/smc_core.c 	rb_erase(&conn->alert_node, &lgr->conns_all);
lgr               119 net/smc/smc_core.c 	lgr->conns_num--;
lgr               128 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               130 net/smc/smc_core.c 	if (!lgr)
lgr               132 net/smc/smc_core.c 	write_lock_bh(&lgr->conns_lock);
lgr               136 net/smc/smc_core.c 	write_unlock_bh(&lgr->conns_lock);
lgr               153 net/smc/smc_core.c static void smc_lgr_free(struct smc_link_group *lgr);
lgr               157 net/smc/smc_core.c 	struct smc_link_group *lgr = container_of(to_delayed_work(work),
lgr               163 net/smc/smc_core.c 	read_lock_bh(&lgr->conns_lock);
lgr               164 net/smc/smc_core.c 	conns = RB_EMPTY_ROOT(&lgr->conns_all);
lgr               165 net/smc/smc_core.c 	read_unlock_bh(&lgr->conns_lock);
lgr               170 net/smc/smc_core.c 	if (!list_empty(&lgr->list))
lgr               171 net/smc/smc_core.c 		list_del_init(&lgr->list); /* remove from smc_lgr_list */
lgr               174 net/smc/smc_core.c 	if (!lgr->is_smcd && !lgr->terminating)	{
lgr               175 net/smc/smc_core.c 		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
lgr               181 net/smc/smc_core.c 			smc_lgr_schedule_free_work(lgr);
lgr               186 net/smc/smc_core.c 	if (!delayed_work_pending(&lgr->free_work)) {
lgr               187 net/smc/smc_core.c 		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
lgr               189 net/smc/smc_core.c 		if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
lgr               191 net/smc/smc_core.c 		if (lgr->is_smcd)
lgr               192 net/smc/smc_core.c 			smc_ism_signal_shutdown(lgr);
lgr               193 net/smc/smc_core.c 		smc_lgr_free(lgr);
lgr               200 net/smc/smc_core.c 	struct smc_link_group *lgr;
lgr               213 net/smc/smc_core.c 	lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
lgr               214 net/smc/smc_core.c 	if (!lgr) {
lgr               218 net/smc/smc_core.c 	lgr->is_smcd = ini->is_smcd;
lgr               219 net/smc/smc_core.c 	lgr->sync_err = 0;
lgr               220 net/smc/smc_core.c 	lgr->vlan_id = ini->vlan_id;
lgr               221 net/smc/smc_core.c 	rwlock_init(&lgr->sndbufs_lock);
lgr               222 net/smc/smc_core.c 	rwlock_init(&lgr->rmbs_lock);
lgr               223 net/smc/smc_core.c 	rwlock_init(&lgr->conns_lock);
lgr               225 net/smc/smc_core.c 		INIT_LIST_HEAD(&lgr->sndbufs[i]);
lgr               226 net/smc/smc_core.c 		INIT_LIST_HEAD(&lgr->rmbs[i]);
lgr               229 net/smc/smc_core.c 	memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
lgr               230 net/smc/smc_core.c 	INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
lgr               231 net/smc/smc_core.c 	lgr->conns_all = RB_ROOT;
lgr               235 net/smc/smc_core.c 		lgr->peer_gid = ini->ism_gid;
lgr               236 net/smc/smc_core.c 		lgr->smcd = ini->ism_dev;
lgr               240 net/smc/smc_core.c 		lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
lgr               241 net/smc/smc_core.c 		memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
lgr               244 net/smc/smc_core.c 		lnk = &lgr->lnk[SMC_SINGLE_LINK];
lgr               278 net/smc/smc_core.c 	smc->conn.lgr = lgr;
lgr               280 net/smc/smc_core.c 	list_add(&lgr->list, &smc_lgr_list.list);
lgr               293 net/smc/smc_core.c 	kfree(lgr);
lgr               308 net/smc/smc_core.c 			  struct smc_link_group *lgr)
lgr               314 net/smc/smc_core.c 			if (!lgr->is_smcd) {
lgr               317 net/smc/smc_core.c 						&lgr->lnk[SMC_SINGLE_LINK],
lgr               323 net/smc/smc_core.c 			write_lock_bh(&lgr->rmbs_lock);
lgr               325 net/smc/smc_core.c 			write_unlock_bh(&lgr->rmbs_lock);
lgr               327 net/smc/smc_core.c 			smc_buf_free(lgr, true, conn->rmb_desc);
lgr               335 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               337 net/smc/smc_core.c 	if (!lgr)
lgr               339 net/smc/smc_core.c 	if (lgr->is_smcd) {
lgr               346 net/smc/smc_core.c 	smc_buf_unuse(conn, lgr);		/* allow buffer reuse */
lgr               347 net/smc/smc_core.c 	conn->lgr = NULL;
lgr               349 net/smc/smc_core.c 	if (!lgr->conns_num)
lgr               350 net/smc/smc_core.c 		smc_lgr_schedule_free_work(lgr);
lgr               364 net/smc/smc_core.c static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
lgr               367 net/smc/smc_core.c 	struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
lgr               385 net/smc/smc_core.c static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
lgr               391 net/smc/smc_core.c 		smc_ism_unregister_dmb(lgr->smcd, buf_desc);
lgr               398 net/smc/smc_core.c static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
lgr               401 net/smc/smc_core.c 	if (lgr->is_smcd)
lgr               402 net/smc/smc_core.c 		smcd_buf_free(lgr, is_rmb, buf_desc);
lgr               404 net/smc/smc_core.c 		smcr_buf_free(lgr, is_rmb, buf_desc);
lgr               407 net/smc/smc_core.c static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
lgr               415 net/smc/smc_core.c 			buf_list = &lgr->rmbs[i];
lgr               417 net/smc/smc_core.c 			buf_list = &lgr->sndbufs[i];
lgr               421 net/smc/smc_core.c 			smc_buf_free(lgr, is_rmb, buf_desc);
lgr               426 net/smc/smc_core.c static void smc_lgr_free_bufs(struct smc_link_group *lgr)
lgr               429 net/smc/smc_core.c 	__smc_lgr_free_bufs(lgr, false);
lgr               431 net/smc/smc_core.c 	__smc_lgr_free_bufs(lgr, true);
lgr               435 net/smc/smc_core.c static void smc_lgr_free(struct smc_link_group *lgr)
lgr               437 net/smc/smc_core.c 	smc_lgr_free_bufs(lgr);
lgr               438 net/smc/smc_core.c 	if (lgr->is_smcd) {
lgr               439 net/smc/smc_core.c 		smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
lgr               440 net/smc/smc_core.c 		put_device(&lgr->smcd->dev);
lgr               442 net/smc/smc_core.c 		smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
lgr               443 net/smc/smc_core.c 		put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
lgr               445 net/smc/smc_core.c 	kfree(lgr);
lgr               448 net/smc/smc_core.c void smc_lgr_forget(struct smc_link_group *lgr)
lgr               452 net/smc/smc_core.c 	if (!list_empty(&lgr->list))
lgr               453 net/smc/smc_core.c 		list_del_init(&lgr->list);
lgr               458 net/smc/smc_core.c static void __smc_lgr_terminate(struct smc_link_group *lgr)
lgr               464 net/smc/smc_core.c 	if (lgr->terminating)
lgr               466 net/smc/smc_core.c 	lgr->terminating = 1;
lgr               467 net/smc/smc_core.c 	if (!list_empty(&lgr->list)) /* forget lgr */
lgr               468 net/smc/smc_core.c 		list_del_init(&lgr->list);
lgr               469 net/smc/smc_core.c 	if (!lgr->is_smcd)
lgr               470 net/smc/smc_core.c 		smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
lgr               472 net/smc/smc_core.c 	write_lock_bh(&lgr->conns_lock);
lgr               473 net/smc/smc_core.c 	node = rb_first(&lgr->conns_all);
lgr               480 net/smc/smc_core.c 		conn->lgr = NULL;
lgr               481 net/smc/smc_core.c 		write_unlock_bh(&lgr->conns_lock);
lgr               484 net/smc/smc_core.c 		write_lock_bh(&lgr->conns_lock);
lgr               485 net/smc/smc_core.c 		node = rb_first(&lgr->conns_all);
lgr               487 net/smc/smc_core.c 	write_unlock_bh(&lgr->conns_lock);
lgr               488 net/smc/smc_core.c 	if (!lgr->is_smcd)
lgr               489 net/smc/smc_core.c 		wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
lgr               490 net/smc/smc_core.c 	smc_lgr_schedule_free_work(lgr);
lgr               493 net/smc/smc_core.c void smc_lgr_terminate(struct smc_link_group *lgr)
lgr               496 net/smc/smc_core.c 	__smc_lgr_terminate(lgr);
lgr               503 net/smc/smc_core.c 	struct smc_link_group *lgr, *l;
lgr               506 net/smc/smc_core.c 	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
lgr               507 net/smc/smc_core.c 		if (!lgr->is_smcd &&
lgr               508 net/smc/smc_core.c 		    lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr               509 net/smc/smc_core.c 		    lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
lgr               510 net/smc/smc_core.c 			__smc_lgr_terminate(lgr);
lgr               518 net/smc/smc_core.c 	struct smc_link_group *lgr, *l;
lgr               523 net/smc/smc_core.c 	list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
lgr               524 net/smc/smc_core.c 		if (lgr->is_smcd && lgr->smcd == dev &&
lgr               525 net/smc/smc_core.c 		    (!peer_gid || lgr->peer_gid == peer_gid) &&
lgr               526 net/smc/smc_core.c 		    (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
lgr               527 net/smc/smc_core.c 			__smc_lgr_terminate(lgr);
lgr               528 net/smc/smc_core.c 			list_move(&lgr->list, &lgr_free_list);
lgr               534 net/smc/smc_core.c 	list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
lgr               535 net/smc/smc_core.c 		list_del_init(&lgr->list);
lgr               536 net/smc/smc_core.c 		cancel_delayed_work_sync(&lgr->free_work);
lgr               538 net/smc/smc_core.c 			smc_ism_signal_shutdown(lgr);
lgr               539 net/smc/smc_core.c 		smc_lgr_free(lgr);
lgr               590 net/smc/smc_core.c static bool smcr_lgr_match(struct smc_link_group *lgr,
lgr               594 net/smc/smc_core.c 	return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
lgr               596 net/smc/smc_core.c 		!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
lgr               598 net/smc/smc_core.c 		!memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
lgr               600 net/smc/smc_core.c 		lgr->role == role &&
lgr               601 net/smc/smc_core.c 		(lgr->role == SMC_SERV ||
lgr               602 net/smc/smc_core.c 		 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
lgr               605 net/smc/smc_core.c static bool smcd_lgr_match(struct smc_link_group *lgr,
lgr               608 net/smc/smc_core.c 	return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
lgr               615 net/smc/smc_core.c 	struct smc_link_group *lgr;
lgr               627 net/smc/smc_core.c 	list_for_each_entry(lgr, &smc_lgr_list.list, list) {
lgr               628 net/smc/smc_core.c 		write_lock_bh(&lgr->conns_lock);
lgr               630 net/smc/smc_core.c 		     smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
lgr               631 net/smc/smc_core.c 		     smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
lgr               632 net/smc/smc_core.c 		    !lgr->sync_err &&
lgr               633 net/smc/smc_core.c 		    lgr->vlan_id == ini->vlan_id &&
lgr               635 net/smc/smc_core.c 		     lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
lgr               638 net/smc/smc_core.c 			conn->lgr = lgr;
lgr               640 net/smc/smc_core.c 			if (delayed_work_pending(&lgr->free_work))
lgr               641 net/smc/smc_core.c 				cancel_delayed_work(&lgr->free_work);
lgr               642 net/smc/smc_core.c 			write_unlock_bh(&lgr->conns_lock);
lgr               645 net/smc/smc_core.c 		write_unlock_bh(&lgr->conns_lock);
lgr               663 net/smc/smc_core.c 		lgr = conn->lgr;
lgr               664 net/smc/smc_core.c 		write_lock_bh(&lgr->conns_lock);
lgr               666 net/smc/smc_core.c 		write_unlock_bh(&lgr->conns_lock);
lgr               739 net/smc/smc_core.c static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
lgr               763 net/smc/smc_core.c 	lnk = &lgr->lnk[SMC_SINGLE_LINK];
lgr               767 net/smc/smc_core.c 		smc_buf_free(lgr, is_rmb, buf_desc);
lgr               778 net/smc/smc_core.c 		smc_buf_free(lgr, is_rmb, buf_desc);
lgr               789 net/smc/smc_core.c 			smc_buf_free(lgr, is_rmb, buf_desc);
lgr               800 net/smc/smc_core.c static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
lgr               814 net/smc/smc_core.c 		rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
lgr               839 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               856 net/smc/smc_core.c 			lock = &lgr->rmbs_lock;
lgr               857 net/smc/smc_core.c 			buf_list = &lgr->rmbs[bufsize_short];
lgr               859 net/smc/smc_core.c 			lock = &lgr->sndbufs_lock;
lgr               860 net/smc/smc_core.c 			buf_list = &lgr->sndbufs[bufsize_short];
lgr               874 net/smc/smc_core.c 			buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
lgr               876 net/smc/smc_core.c 			buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
lgr               912 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               914 net/smc/smc_core.c 	if (!conn->lgr || conn->lgr->is_smcd)
lgr               916 net/smc/smc_core.c 	smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
lgr               922 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               924 net/smc/smc_core.c 	if (!conn->lgr || conn->lgr->is_smcd)
lgr               926 net/smc/smc_core.c 	smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
lgr               932 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               934 net/smc/smc_core.c 	if (!conn->lgr || conn->lgr->is_smcd)
lgr               936 net/smc/smc_core.c 	smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
lgr               942 net/smc/smc_core.c 	struct smc_link_group *lgr = conn->lgr;
lgr               944 net/smc/smc_core.c 	if (!conn->lgr || conn->lgr->is_smcd)
lgr               946 net/smc/smc_core.c 	smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
lgr               967 net/smc/smc_core.c 		smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
lgr               971 net/smc/smc_core.c static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
lgr               975 net/smc/smc_core.c 	for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
lgr               976 net/smc/smc_core.c 		if (!test_and_set_bit(i, lgr->rtokens_used_mask))
lgr               983 net/smc/smc_core.c int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
lgr               990 net/smc/smc_core.c 		if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
lgr               991 net/smc/smc_core.c 		    (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
lgr               992 net/smc/smc_core.c 		    test_bit(i, lgr->rtokens_used_mask)) {
lgr               997 net/smc/smc_core.c 	i = smc_rmb_reserve_rtoken_idx(lgr);
lgr              1000 net/smc/smc_core.c 	lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
lgr              1001 net/smc/smc_core.c 	lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
lgr              1006 net/smc/smc_core.c int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
lgr              1012 net/smc/smc_core.c 		if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
lgr              1013 net/smc/smc_core.c 		    test_bit(i, lgr->rtokens_used_mask)) {
lgr              1014 net/smc/smc_core.c 			lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
lgr              1015 net/smc/smc_core.c 			lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
lgr              1017 net/smc/smc_core.c 			clear_bit(i, lgr->rtokens_used_mask);
lgr              1028 net/smc/smc_core.c 	conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
lgr              1038 net/smc/smc_core.c 	struct smc_link_group *lgr, *lg;
lgr              1045 net/smc/smc_core.c 	list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
lgr              1046 net/smc/smc_core.c 		list_del_init(&lgr->list);
lgr              1047 net/smc/smc_core.c 		if (!lgr->is_smcd) {
lgr              1048 net/smc/smc_core.c 			struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
lgr              1055 net/smc/smc_core.c 		cancel_delayed_work_sync(&lgr->free_work);
lgr              1056 net/smc/smc_core.c 		if (lgr->is_smcd)
lgr              1057 net/smc/smc_core.c 			smc_ism_signal_shutdown(lgr);
lgr              1058 net/smc/smc_core.c 		smc_lgr_free(lgr); /* free link group */
lgr               258 net/smc/smc_core.h 	u32 token, struct smc_link_group *lgr)
lgr               263 net/smc/smc_core.h 	node = lgr->conns_all.rb_node;
lgr               287 net/smc/smc_core.h void smc_lgr_forget(struct smc_link_group *lgr);
lgr               288 net/smc/smc_core.h void smc_lgr_terminate(struct smc_link_group *lgr);
lgr               296 net/smc/smc_core.h int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey);
lgr               297 net/smc/smc_core.h int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey);
lgr               307 net/smc/smc_core.h void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
lgr                96 net/smc/smc_diag.c 	else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
lgr               149 net/smc/smc_diag.c 	if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
lgr               151 net/smc/smc_diag.c 	    !list_empty(&smc->conn.lgr->list)) {
lgr               153 net/smc/smc_diag.c 			.role = smc->conn.lgr->role,
lgr               154 net/smc/smc_diag.c 			.lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
lgr               155 net/smc/smc_diag.c 			.lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
lgr               159 net/smc/smc_diag.c 		       smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
lgr               160 net/smc/smc_diag.c 		       sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
lgr               162 net/smc/smc_diag.c 				     smc->conn.lgr->lnk[0].gid);
lgr               164 net/smc/smc_diag.c 				     smc->conn.lgr->lnk[0].peer_gid);
lgr               169 net/smc/smc_diag.c 	if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
lgr               171 net/smc/smc_diag.c 	    !list_empty(&smc->conn.lgr->list)) {
lgr               174 net/smc/smc_diag.c 			.linkid = *((u32 *)conn->lgr->id),
lgr               175 net/smc/smc_diag.c 			.peer_gid = conn->lgr->peer_gid,
lgr               176 net/smc/smc_diag.c 			.my_gid = conn->lgr->smcd->local_gid,
lgr               116 net/smc/smc_ib.c 	struct smc_link_group *lgr = smc_get_lgr(lnk);
lgr               136 net/smc/smc_ib.c 	if (lgr->role == SMC_SERV) {
lgr                46 net/smc/smc_ism.c 	spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
lgr                47 net/smc/smc_ism.c 	conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
lgr                48 net/smc/smc_ism.c 	spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
lgr                59 net/smc/smc_ism.c 	spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
lgr                60 net/smc/smc_ism.c 	conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
lgr                61 net/smc/smc_ism.c 	spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
lgr               159 net/smc/smc_ism.c int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
lgr               168 net/smc/smc_ism.c 	dmb.vlan_id = lgr->vlan_id;
lgr               169 net/smc/smc_ism.c 	dmb.rgid = lgr->peer_gid;
lgr               170 net/smc/smc_ism.c 	rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
lgr               224 net/smc/smc_ism.c int smc_ism_signal_shutdown(struct smc_link_group *lgr)
lgr               229 net/smc/smc_ism.c 	memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
lgr               230 net/smc/smc_ism.c 	ev_info.vlan_id = lgr->vlan_id;
lgr               232 net/smc/smc_ism.c 	rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
lgr                43 net/smc/smc_ism.h int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
lgr                48 net/smc/smc_ism.h int smc_ism_signal_shutdown(struct smc_link_group *lgr);
lgr               189 net/smc/smc_llc.c 	struct smc_link_group *lgr = smc_get_lgr(link);
lgr               210 net/smc/smc_llc.c 	memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
lgr               406 net/smc/smc_llc.c 	struct smc_link_group *lgr = smc_get_lgr(link);
lgr               416 net/smc/smc_llc.c 		if (lgr->role == SMC_SERV &&
lgr               422 net/smc/smc_llc.c 		if (lgr->role == SMC_CLNT &&
lgr               434 net/smc/smc_llc.c 	struct smc_link_group *lgr = smc_get_lgr(link);
lgr               445 net/smc/smc_llc.c 		if (lgr->role == SMC_SERV) {
lgr               462 net/smc/smc_llc.c 	struct smc_link_group *lgr = smc_get_lgr(link);
lgr               465 net/smc/smc_llc.c 		if (lgr->role == SMC_SERV)
lgr               466 net/smc/smc_llc.c 			smc_lgr_schedule_free_work_fast(lgr);
lgr               468 net/smc/smc_llc.c 		smc_lgr_forget(lgr);
lgr               470 net/smc/smc_llc.c 		if (lgr->role == SMC_SERV) {
lgr               478 net/smc/smc_llc.c 		smc_lgr_schedule_free_work_fast(lgr);
lgr               628 net/smc/smc_llc.c 	struct smc_link_group *lgr = smc_get_lgr(link);
lgr               630 net/smc/smc_llc.c 					       *((u32 *)lgr->id),
lgr               260 net/smc/smc_tx.c 	rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
lgr               270 net/smc/smc_tx.c 	struct smc_link_group *lgr = conn->lgr;
lgr               274 net/smc/smc_tx.c 	link = &lgr->lnk[SMC_SINGLE_LINK];
lgr               278 net/smc/smc_tx.c 		lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
lgr               283 net/smc/smc_tx.c 	rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
lgr               287 net/smc/smc_tx.c 		smc_lgr_terminate(lgr);
lgr               459 net/smc/smc_tx.c 	if (conn->lgr->is_smcd)
lgr               510 net/smc/smc_tx.c 			smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
lgr               550 net/smc/smc_tx.c 	if (conn->lgr->is_smcd)