This source file includes following definitions.
- scif_invalidate_ep
- scif_free_qp
- scif_cleanup_qp
- scif_send_acks
- scif_cleanup_scifdev
- scif_handle_remove_node
- scif_send_rmnode_msg
- scif_disconnect_node
- scif_get_node_info
1
2
3
4
5
6
7
8
9 #include "scif_peer_bus.h"
10
11 #include "scif_main.h"
12 #include "scif_map.h"
13
14
15
16
17
18 static void scif_invalidate_ep(int node)
19 {
20 struct scif_endpt *ep;
21 struct list_head *pos, *tmpq;
22
23 flush_work(&scif_info.conn_work);
24 mutex_lock(&scif_info.connlock);
25 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
26 ep = list_entry(pos, struct scif_endpt, list);
27 if (ep->remote_dev->node == node) {
28 scif_unmap_all_windows(ep);
29 spin_lock(&ep->lock);
30 scif_cleanup_ep_qp(ep);
31 spin_unlock(&ep->lock);
32 }
33 }
34 list_for_each_safe(pos, tmpq, &scif_info.connected) {
35 ep = list_entry(pos, struct scif_endpt, list);
36 if (ep->remote_dev->node == node) {
37 list_del(pos);
38 spin_lock(&ep->lock);
39 ep->state = SCIFEP_DISCONNECTED;
40 list_add_tail(&ep->list, &scif_info.disconnected);
41 scif_cleanup_ep_qp(ep);
42 wake_up_interruptible(&ep->sendwq);
43 wake_up_interruptible(&ep->recvwq);
44 spin_unlock(&ep->lock);
45 scif_unmap_all_windows(ep);
46 }
47 }
48 mutex_unlock(&scif_info.connlock);
49 }
50
51 void scif_free_qp(struct scif_dev *scifdev)
52 {
53 struct scif_qp *qp = scifdev->qpairs;
54
55 if (!qp)
56 return;
57 scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size);
58 kfree(qp->inbound_q.rb_base);
59 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
60 kfree(scifdev->qpairs);
61 scifdev->qpairs = NULL;
62 }
63
64 static void scif_cleanup_qp(struct scif_dev *dev)
65 {
66 struct scif_qp *qp = &dev->qpairs[0];
67
68 if (!qp)
69 return;
70 scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
71 scif_iounmap((void *)qp->outbound_q.rb_base,
72 sizeof(struct scif_qp), dev);
73 qp->remote_qp = NULL;
74 qp->local_write = 0;
75 qp->inbound_q.current_write_offset = 0;
76 qp->inbound_q.current_read_offset = 0;
77 if (scifdev_is_p2p(dev))
78 scif_free_qp(dev);
79 }
80
81 void scif_send_acks(struct scif_dev *dev)
82 {
83 struct scifmsg msg;
84
85 if (dev->node_remove_ack_pending) {
86 msg.uop = SCIF_NODE_REMOVE_ACK;
87 msg.src.node = scif_info.nodeid;
88 msg.dst.node = SCIF_MGMT_NODE;
89 msg.payload[0] = dev->node;
90 scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
91 dev->node_remove_ack_pending = false;
92 }
93 if (dev->exit_ack_pending) {
94 msg.uop = SCIF_EXIT_ACK;
95 msg.src.node = scif_info.nodeid;
96 msg.dst.node = dev->node;
97 scif_nodeqp_send(dev, &msg);
98 dev->exit_ack_pending = false;
99 }
100 }
101
102
103
104
105
106
107
108 void scif_cleanup_scifdev(struct scif_dev *dev)
109 {
110 struct scif_hw_dev *sdev = dev->sdev;
111
112 if (!dev->sdev)
113 return;
114 if (scifdev_is_p2p(dev)) {
115 if (dev->cookie) {
116 sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
117 dev->cookie = NULL;
118 }
119 scif_destroy_intr_wq(dev);
120 }
121 flush_work(&scif_info.misc_work);
122 scif_destroy_p2p(dev);
123 scif_invalidate_ep(dev->node);
124 scif_zap_mmaps(dev->node);
125 scif_cleanup_rma_for_zombies(dev->node);
126 flush_work(&scif_info.misc_work);
127 scif_send_acks(dev);
128 if (!dev->node && scif_info.card_initiated_exit) {
129
130
131
132
133 scif_send_exit(dev);
134 scif_info.card_initiated_exit = false;
135 }
136 scif_cleanup_qp(dev);
137 }
138
139
140
141
142
143
144 void scif_handle_remove_node(int node)
145 {
146 struct scif_dev *scifdev = &scif_dev[node];
147
148 if (scif_peer_unregister_device(scifdev))
149 scif_send_acks(scifdev);
150 }
151
152 static int scif_send_rmnode_msg(int node, int remove_node)
153 {
154 struct scifmsg notif_msg;
155 struct scif_dev *dev = &scif_dev[node];
156
157 notif_msg.uop = SCIF_NODE_REMOVE;
158 notif_msg.src.node = scif_info.nodeid;
159 notif_msg.dst.node = node;
160 notif_msg.payload[0] = remove_node;
161 return scif_nodeqp_send(dev, ¬if_msg);
162 }
163
164
165
166
167
168
169
170
171
172 void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
173 {
174 int ret;
175 int msg_cnt = 0;
176 u32 i = 0;
177 struct scif_dev *scifdev = &scif_dev[node_id];
178
179 if (!node_id)
180 return;
181
182 atomic_set(&scifdev->disconn_rescnt, 0);
183
184
185 for (i = 1; i <= scif_info.maxid; i++) {
186 if (i == node_id)
187 continue;
188 ret = scif_send_rmnode_msg(i, node_id);
189 if (!ret)
190 msg_cnt++;
191 }
192
193 ret = wait_event_timeout(scifdev->disconn_wq,
194 (atomic_read(&scifdev->disconn_rescnt)
195 == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
196
197 if (mgmt_initiated && _scifdev_alive(scifdev))
198
199
200
201
202 scif_send_exit(scifdev);
203 atomic_set(&scifdev->disconn_rescnt, 0);
204
205 ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
206 if (!ret)
207
208 wait_event_timeout(scifdev->disconn_wq,
209 (atomic_read(&scifdev->disconn_rescnt) == 1),
210 SCIF_NODE_ALIVE_TIMEOUT);
211 }
212
213 void scif_get_node_info(void)
214 {
215 struct scifmsg msg;
216 DECLARE_COMPLETION_ONSTACK(node_info);
217
218 msg.uop = SCIF_GET_NODE_INFO;
219 msg.src.node = scif_info.nodeid;
220 msg.dst.node = SCIF_MGMT_NODE;
221 msg.payload[3] = (u64)&node_info;
222
223 if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
224 return;
225
226
227 wait_for_completion(&node_info);
228 }