rnet              405 drivers/infiniband/core/core_priv.h int rdma_nl_net_init(struct rdma_dev_net *rnet);
rnet              406 drivers/infiniband/core/core_priv.h void rdma_nl_net_exit(struct rdma_dev_net *rnet);
rnet              861 drivers/infiniband/core/device.c 			      struct rdma_dev_net *rnet)
rnet              874 drivers/infiniband/core/device.c 	if (net_eq(read_pnet(&rnet->net),
rnet              884 drivers/infiniband/core/device.c 	cdev = xa_load(&device->compat_devs, rnet->id);
rnet              889 drivers/infiniband/core/device.c 	ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
rnet              900 drivers/infiniband/core/device.c 	rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
rnet              913 drivers/infiniband/core/device.c 	ret = xa_err(xa_store(&device->compat_devs, rnet->id,
rnet              928 drivers/infiniband/core/device.c 	xa_release(&device->compat_devs, rnet->id);
rnet              959 drivers/infiniband/core/device.c 	struct rdma_dev_net *rnet;
rnet              966 drivers/infiniband/core/device.c 	xa_for_each (&rdma_nets, index, rnet) {
rnet              967 drivers/infiniband/core/device.c 		ret = add_one_compat_dev(device, rnet);
rnet              998 drivers/infiniband/core/device.c 	struct rdma_dev_net *rnet;
rnet             1011 drivers/infiniband/core/device.c 		xa_for_each (&rdma_nets, net_index, rnet) {
rnet             1012 drivers/infiniband/core/device.c 			ret = add_one_compat_dev(dev, rnet);
rnet             1026 drivers/infiniband/core/device.c 	struct rdma_dev_net *rnet;
rnet             1039 drivers/infiniband/core/device.c 	xa_for_each (&rdma_nets, index, rnet) {
rnet             1058 drivers/infiniband/core/device.c 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
rnet             1067 drivers/infiniband/core/device.c 	ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
rnet             1080 drivers/infiniband/core/device.c 		remove_one_compat_dev(dev, rnet->id);
rnet             1092 drivers/infiniband/core/device.c 	rdma_nl_net_exit(rnet);
rnet             1093 drivers/infiniband/core/device.c 	xa_erase(&rdma_nets, rnet->id);
rnet             1098 drivers/infiniband/core/device.c 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
rnet             1103 drivers/infiniband/core/device.c 	write_pnet(&rnet->net, net);
rnet             1105 drivers/infiniband/core/device.c 	ret = rdma_nl_net_init(rnet);
rnet             1113 drivers/infiniband/core/device.c 	ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
rnet             1115 drivers/infiniband/core/device.c 		rdma_nl_net_exit(rnet);
rnet             1125 drivers/infiniband/core/device.c 		ret = add_one_compat_dev(dev, rnet);
rnet               55 drivers/infiniband/core/netlink.c 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
rnet               57 drivers/infiniband/core/netlink.c 	return netlink_has_listeners(rnet->nl_sock, group);
rnet              264 drivers/infiniband/core/netlink.c 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
rnet              267 drivers/infiniband/core/netlink.c 	err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
rnet              274 drivers/infiniband/core/netlink.c 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
rnet              277 drivers/infiniband/core/netlink.c 	err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
rnet              285 drivers/infiniband/core/netlink.c 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
rnet              287 drivers/infiniband/core/netlink.c 	return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
rnet              309 drivers/infiniband/core/netlink.c int rdma_nl_net_init(struct rdma_dev_net *rnet)
rnet              311 drivers/infiniband/core/netlink.c 	struct net *net = read_pnet(&rnet->net);
rnet              322 drivers/infiniband/core/netlink.c 	rnet->nl_sock = nls;
rnet              326 drivers/infiniband/core/netlink.c void rdma_nl_net_exit(struct rdma_dev_net *rnet)
rnet              328 drivers/infiniband/core/netlink.c 	netlink_kernel_release(rnet->nl_sock);
rnet               96 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet               99 drivers/net/rionet.c 	i = rnet->rx_slot;
rnet              102 drivers/net/rionet.c 		if (!rnet->rx_skb[i])
rnet              105 drivers/net/rionet.c 		if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
rnet              108 drivers/net/rionet.c 		rnet->rx_skb[i]->data = data;
rnet              109 drivers/net/rionet.c 		skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
rnet              110 drivers/net/rionet.c 		rnet->rx_skb[i]->protocol =
rnet              111 drivers/net/rionet.c 		    eth_type_trans(rnet->rx_skb[i], ndev);
rnet              112 drivers/net/rionet.c 		error = netif_rx(rnet->rx_skb[i]);
rnet              121 drivers/net/rionet.c 	} while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
rnet              129 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              131 drivers/net/rionet.c 	i = rnet->rx_slot;
rnet              133 drivers/net/rionet.c 		rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
rnet              135 drivers/net/rionet.c 		if (!rnet->rx_skb[i])
rnet              138 drivers/net/rionet.c 		rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
rnet              139 drivers/net/rionet.c 				   rnet->rx_skb[i]->data);
rnet              142 drivers/net/rionet.c 	rnet->rx_slot = i;
rnet              148 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              150 drivers/net/rionet.c 	rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
rnet              151 drivers/net/rionet.c 	rnet->tx_skb[rnet->tx_slot] = skb;
rnet              156 drivers/net/rionet.c 	if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
rnet              159 drivers/net/rionet.c 	++rnet->tx_slot;
rnet              160 drivers/net/rionet.c 	rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
rnet              162 drivers/net/rionet.c 	if (netif_msg_tx_queued(rnet))
rnet              172 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              178 drivers/net/rionet.c 	spin_lock_irqsave(&rnet->tx_lock, flags);
rnet              181 drivers/net/rionet.c 		add_num = nets[rnet->mport->id].nact;
rnet              183 drivers/net/rionet.c 	if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
rnet              185 drivers/net/rionet.c 		spin_unlock_irqrestore(&rnet->tx_lock, flags);
rnet              194 drivers/net/rionet.c 		for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
rnet              196 drivers/net/rionet.c 			if (nets[rnet->mport->id].active[i]) {
rnet              198 drivers/net/rionet.c 					nets[rnet->mport->id].active[i]);
rnet              205 drivers/net/rionet.c 		if (nets[rnet->mport->id].active[destid])
rnet              207 drivers/net/rionet.c 					nets[rnet->mport->id].active[destid]);
rnet              221 drivers/net/rionet.c 	spin_unlock_irqrestore(&rnet->tx_lock, flags);
rnet              230 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              232 drivers/net/rionet.c 	unsigned char netid = rnet->mport->id;
rnet              234 drivers/net/rionet.c 	if (netif_msg_intr(rnet))
rnet              259 drivers/net/rionet.c 		if (netif_msg_intr(rnet))
rnet              269 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              271 drivers/net/rionet.c 	if (netif_msg_intr(rnet))
rnet              275 drivers/net/rionet.c 	spin_lock(&rnet->lock);
rnet              276 drivers/net/rionet.c 	if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
rnet              278 drivers/net/rionet.c 	spin_unlock(&rnet->lock);
rnet              284 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              286 drivers/net/rionet.c 	spin_lock(&rnet->tx_lock);
rnet              288 drivers/net/rionet.c 	if (netif_msg_intr(rnet))
rnet              293 drivers/net/rionet.c 	while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
rnet              295 drivers/net/rionet.c 		dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
rnet              296 drivers/net/rionet.c 		rnet->tx_skb[rnet->ack_slot] = NULL;
rnet              297 drivers/net/rionet.c 		++rnet->ack_slot;
rnet              298 drivers/net/rionet.c 		rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
rnet              299 drivers/net/rionet.c 		rnet->tx_cnt--;
rnet              302 drivers/net/rionet.c 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
rnet              305 drivers/net/rionet.c 	spin_unlock(&rnet->tx_lock);
rnet              312 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              313 drivers/net/rionet.c 	unsigned char netid = rnet->mport->id;
rnet              316 drivers/net/rionet.c 	if (netif_msg_ifup(rnet))
rnet              319 drivers/net/rionet.c 	if ((rc = rio_request_inb_dbell(rnet->mport,
rnet              326 drivers/net/rionet.c 	if ((rc = rio_request_inb_mbox(rnet->mport,
rnet              333 drivers/net/rionet.c 	if ((rc = rio_request_outb_mbox(rnet->mport,
rnet              342 drivers/net/rionet.c 		rnet->rx_skb[i] = NULL;
rnet              343 drivers/net/rionet.c 	rnet->rx_slot = 0;
rnet              346 drivers/net/rionet.c 	rnet->tx_slot = 0;
rnet              347 drivers/net/rionet.c 	rnet->tx_cnt = 0;
rnet              348 drivers/net/rionet.c 	rnet->ack_slot = 0;
rnet              359 drivers/net/rionet.c 	rnet->open = true;
rnet              367 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              369 drivers/net/rionet.c 	unsigned char netid = rnet->mport->id;
rnet              373 drivers/net/rionet.c 	if (netif_msg_ifup(rnet))
rnet              378 drivers/net/rionet.c 	rnet->open = false;
rnet              381 drivers/net/rionet.c 		kfree_skb(rnet->rx_skb[i]);
rnet              394 drivers/net/rionet.c 	rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
rnet              396 drivers/net/rionet.c 	rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
rnet              397 drivers/net/rionet.c 	rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
rnet              443 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              448 drivers/net/rionet.c 	strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
rnet              453 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              455 drivers/net/rionet.c 	return rnet->msg_enable;
rnet              460 drivers/net/rionet.c 	struct rionet_private *rnet = netdev_priv(ndev);
rnet              462 drivers/net/rionet.c 	rnet->msg_enable = value;
rnet              483 drivers/net/rionet.c 	struct rionet_private *rnet;
rnet              497 drivers/net/rionet.c 	rnet = netdev_priv(ndev);
rnet              498 drivers/net/rionet.c 	rnet->mport = mport;
rnet              499 drivers/net/rionet.c 	rnet->open = false;
rnet              519 drivers/net/rionet.c 	spin_lock_init(&rnet->lock);
rnet              520 drivers/net/rionet.c 	spin_lock_init(&rnet->tx_lock);
rnet              522 drivers/net/rionet.c 	rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
rnet              598 drivers/net/rionet.c 		struct rionet_private *rnet;
rnet              601 drivers/net/rionet.c 		rnet = netdev_priv(nets[netid].ndev);
rnet              626 drivers/net/rionet.c 		if (rnet->open)