1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
38 
39 #include "rds.h"
40 #include "iw.h"
41 
rds_iw_send_rdma_complete(struct rds_message * rm,int wc_status)42 static void rds_iw_send_rdma_complete(struct rds_message *rm,
43 				      int wc_status)
44 {
45 	int notify_status;
46 
47 	switch (wc_status) {
48 	case IB_WC_WR_FLUSH_ERR:
49 		return;
50 
51 	case IB_WC_SUCCESS:
52 		notify_status = RDS_RDMA_SUCCESS;
53 		break;
54 
55 	case IB_WC_REM_ACCESS_ERR:
56 		notify_status = RDS_RDMA_REMOTE_ERROR;
57 		break;
58 
59 	default:
60 		notify_status = RDS_RDMA_OTHER_ERROR;
61 		break;
62 	}
63 	rds_rdma_send_complete(rm, notify_status);
64 }
65 
rds_iw_send_unmap_rdma(struct rds_iw_connection * ic,struct rm_rdma_op * op)66 static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
67 				   struct rm_rdma_op *op)
68 {
69 	if (op->op_mapped) {
70 		ib_dma_unmap_sg(ic->i_cm_id->device,
71 			op->op_sg, op->op_nents,
72 			op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 		op->op_mapped = 0;
74 	}
75 }
76 
rds_iw_send_unmap_rm(struct rds_iw_connection * ic,struct rds_iw_send_work * send,int wc_status)77 static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
78 			  struct rds_iw_send_work *send,
79 			  int wc_status)
80 {
81 	struct rds_message *rm = send->s_rm;
82 
83 	rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
84 
85 	ib_dma_unmap_sg(ic->i_cm_id->device,
86 		     rm->data.op_sg, rm->data.op_nents,
87 		     DMA_TO_DEVICE);
88 
89 	if (rm->rdma.op_active) {
90 		rds_iw_send_unmap_rdma(ic, &rm->rdma);
91 
92 		/* If the user asked for a completion notification on this
93 		 * message, we can implement three different semantics:
94 		 *  1.	Notify when we received the ACK on the RDS message
95 		 *	that was queued with the RDMA. This provides reliable
96 		 *	notification of RDMA status at the expense of a one-way
97 		 *	packet delay.
98 		 *  2.	Notify when the IB stack gives us the completion event for
99 		 *	the RDMA operation.
100 		 *  3.	Notify when the IB stack gives us the completion event for
101 		 *	the accompanying RDS messages.
102 		 * Here, we implement approach #3. To implement approach #2,
103 		 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 		 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 		 * handling in the ACK processing code.
106 		 *
107 		 * Note: There's no need to explicitly sync any RDMA buffers using
108 		 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 		 * operation itself unmapped the RDMA buffers, which takes care
110 		 * of synching.
111 		 */
112 		rds_iw_send_rdma_complete(rm, wc_status);
113 
114 		if (rm->rdma.op_write)
115 			rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
116 		else
117 			rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
118 	}
119 
120 	/* If anyone waited for this message to get flushed out, wake
121 	 * them up now */
122 	rds_message_unmapped(rm);
123 
124 	rds_message_put(rm);
125 	send->s_rm = NULL;
126 }
127 
rds_iw_send_init_ring(struct rds_iw_connection * ic)128 void rds_iw_send_init_ring(struct rds_iw_connection *ic)
129 {
130 	struct rds_iw_send_work *send;
131 	u32 i;
132 
133 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
134 		struct ib_sge *sge;
135 
136 		send->s_rm = NULL;
137 		send->s_op = NULL;
138 		send->s_mapping = NULL;
139 
140 		send->s_send_wr.next = NULL;
141 		send->s_send_wr.wr_id = i;
142 		send->s_send_wr.sg_list = send->s_sge;
143 		send->s_send_wr.num_sge = 1;
144 		send->s_send_wr.opcode = IB_WR_SEND;
145 		send->s_send_wr.send_flags = 0;
146 		send->s_send_wr.ex.imm_data = 0;
147 
148 		sge = rds_iw_data_sge(ic, send->s_sge);
149 		sge->lkey = 0;
150 
151 		sge = rds_iw_header_sge(ic, send->s_sge);
152 		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
153 		sge->length = sizeof(struct rds_header);
154 		sge->lkey = 0;
155 
156 		send->s_mr = ib_alloc_mr(ic->i_pd, IB_MR_TYPE_MEM_REG,
157 					 fastreg_message_size);
158 		if (IS_ERR(send->s_mr)) {
159 			printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed\n");
160 			break;
161 		}
162 	}
163 }
164 
rds_iw_send_clear_ring(struct rds_iw_connection * ic)165 void rds_iw_send_clear_ring(struct rds_iw_connection *ic)
166 {
167 	struct rds_iw_send_work *send;
168 	u32 i;
169 
170 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
171 		BUG_ON(!send->s_mr);
172 		ib_dereg_mr(send->s_mr);
173 		if (send->s_send_wr.opcode == 0xdead)
174 			continue;
175 		if (send->s_rm)
176 			rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
177 		if (send->s_op)
178 			rds_iw_send_unmap_rdma(ic, send->s_op);
179 	}
180 }
181 
182 /*
183  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
184  * operations performed in the send path.  As the sender allocs and potentially
185  * unallocs the next free entry in the ring it doesn't alter which is
186  * the next to be freed, which is what this is concerned with.
187  */
rds_iw_send_cq_comp_handler(struct ib_cq * cq,void * context)188 void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
189 {
190 	struct rds_connection *conn = context;
191 	struct rds_iw_connection *ic = conn->c_transport_data;
192 	struct ib_wc wc;
193 	struct rds_iw_send_work *send;
194 	u32 completed;
195 	u32 oldest;
196 	u32 i;
197 	int ret;
198 
199 	rdsdebug("cq %p conn %p\n", cq, conn);
200 	rds_iw_stats_inc(s_iw_tx_cq_call);
201 	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
202 	if (ret)
203 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
204 
205 	while (ib_poll_cq(cq, 1, &wc) > 0) {
206 		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
207 			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
208 			 be32_to_cpu(wc.ex.imm_data));
209 		rds_iw_stats_inc(s_iw_tx_cq_event);
210 
211 		if (wc.status != IB_WC_SUCCESS) {
212 			printk(KERN_ERR "WC Error:  status = %d opcode = %d\n", wc.status, wc.opcode);
213 			break;
214 		}
215 
216 		if (wc.opcode == IB_WC_LOCAL_INV && wc.wr_id == RDS_IW_LOCAL_INV_WR_ID) {
217 			ic->i_fastreg_posted = 0;
218 			continue;
219 		}
220 
221 		if (wc.opcode == IB_WC_REG_MR && wc.wr_id == RDS_IW_REG_WR_ID) {
222 			ic->i_fastreg_posted = 1;
223 			continue;
224 		}
225 
226 		if (wc.wr_id == RDS_IW_ACK_WR_ID) {
227 			if (time_after(jiffies, ic->i_ack_queued + HZ/2))
228 				rds_iw_stats_inc(s_iw_tx_stalled);
229 			rds_iw_ack_send_complete(ic);
230 			continue;
231 		}
232 
233 		oldest = rds_iw_ring_oldest(&ic->i_send_ring);
234 
235 		completed = rds_iw_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
236 
237 		for (i = 0; i < completed; i++) {
238 			send = &ic->i_sends[oldest];
239 
240 			/* In the error case, wc.opcode sometimes contains garbage */
241 			switch (send->s_send_wr.opcode) {
242 			case IB_WR_SEND:
243 				if (send->s_rm)
244 					rds_iw_send_unmap_rm(ic, send, wc.status);
245 				break;
246 			case IB_WR_REG_MR:
247 			case IB_WR_RDMA_WRITE:
248 			case IB_WR_RDMA_READ:
249 			case IB_WR_RDMA_READ_WITH_INV:
250 				/* Nothing to be done - the SG list will be unmapped
251 				 * when the SEND completes. */
252 				break;
253 			default:
254 				printk_ratelimited(KERN_NOTICE
255 						"RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
256 						__func__, send->s_send_wr.opcode);
257 				break;
258 			}
259 
260 			send->s_send_wr.opcode = 0xdead;
261 			send->s_send_wr.num_sge = 1;
262 			if (time_after(jiffies, send->s_queued + HZ/2))
263 				rds_iw_stats_inc(s_iw_tx_stalled);
264 
265 			/* If a RDMA operation produced an error, signal this right
266 			 * away. If we don't, the subsequent SEND that goes with this
267 			 * RDMA will be canceled with ERR_WFLUSH, and the application
268 			 * never learn that the RDMA failed. */
269 			if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
270 				struct rds_message *rm;
271 
272 				rm = rds_send_get_message(conn, send->s_op);
273 				if (rm)
274 					rds_iw_send_rdma_complete(rm, wc.status);
275 			}
276 
277 			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
278 		}
279 
280 		rds_iw_ring_free(&ic->i_send_ring, completed);
281 
282 		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
283 		    test_bit(0, &conn->c_map_queued))
284 			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
285 
286 		/* We expect errors as the qp is drained during shutdown */
287 		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
288 			rds_iw_conn_error(conn,
289 				"send completion on %pI4 "
290 				"had status %u, disconnecting and reconnecting\n",
291 				&conn->c_faddr, wc.status);
292 		}
293 	}
294 }
295 
296 /*
297  * This is the main function for allocating credits when sending
298  * messages.
299  *
300  * Conceptually, we have two counters:
301  *  -	send credits: this tells us how many WRs we're allowed
302  *	to submit without overruning the receiver's queue. For
303  *	each SEND WR we post, we decrement this by one.
304  *
305  *  -	posted credits: this tells us how many WRs we recently
306  *	posted to the receive queue. This value is transferred
307  *	to the peer as a "credit update" in a RDS header field.
308  *	Every time we transmit credits to the peer, we subtract
309  *	the amount of transferred credits from this counter.
310  *
311  * It is essential that we avoid situations where both sides have
312  * exhausted their send credits, and are unable to send new credits
313  * to the peer. We achieve this by requiring that we send at least
314  * one credit update to the peer before exhausting our credits.
315  * When new credits arrive, we subtract one credit that is withheld
316  * until we've posted new buffers and are ready to transmit these
317  * credits (see rds_iw_send_add_credits below).
318  *
319  * The RDS send code is essentially single-threaded; rds_send_xmit
320  * grabs c_send_lock to ensure exclusive access to the send ring.
321  * However, the ACK sending code is independent and can race with
322  * message SENDs.
323  *
324  * In the send path, we need to update the counters for send credits
325  * and the counter of posted buffers atomically - when we use the
326  * last available credit, we cannot allow another thread to race us
327  * and grab the posted credits counter.  Hence, we have to use a
328  * spinlock to protect the credit counter, or use atomics.
329  *
330  * Spinlocks shared between the send and the receive path are bad,
331  * because they create unnecessary delays. An early implementation
332  * using a spinlock showed a 5% degradation in throughput at some
333  * loads.
334  *
335  * This implementation avoids spinlocks completely, putting both
336  * counters into a single atomic, and updating that atomic using
337  * atomic_add (in the receive path, when receiving fresh credits),
338  * and using atomic_cmpxchg when updating the two counters.
339  */
rds_iw_send_grab_credits(struct rds_iw_connection * ic,u32 wanted,u32 * adv_credits,int need_posted,int max_posted)340 int rds_iw_send_grab_credits(struct rds_iw_connection *ic,
341 			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
342 {
343 	unsigned int avail, posted, got = 0, advertise;
344 	long oldval, newval;
345 
346 	*adv_credits = 0;
347 	if (!ic->i_flowctl)
348 		return wanted;
349 
350 try_again:
351 	advertise = 0;
352 	oldval = newval = atomic_read(&ic->i_credits);
353 	posted = IB_GET_POST_CREDITS(oldval);
354 	avail = IB_GET_SEND_CREDITS(oldval);
355 
356 	rdsdebug("wanted=%u credits=%u posted=%u\n",
357 			wanted, avail, posted);
358 
359 	/* The last credit must be used to send a credit update. */
360 	if (avail && !posted)
361 		avail--;
362 
363 	if (avail < wanted) {
364 		struct rds_connection *conn = ic->i_cm_id->context;
365 
366 		/* Oops, there aren't that many credits left! */
367 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
368 		got = avail;
369 	} else {
370 		/* Sometimes you get what you want, lalala. */
371 		got = wanted;
372 	}
373 	newval -= IB_SET_SEND_CREDITS(got);
374 
375 	/*
376 	 * If need_posted is non-zero, then the caller wants
377 	 * the posted regardless of whether any send credits are
378 	 * available.
379 	 */
380 	if (posted && (got || need_posted)) {
381 		advertise = min_t(unsigned int, posted, max_posted);
382 		newval -= IB_SET_POST_CREDITS(advertise);
383 	}
384 
385 	/* Finally bill everything */
386 	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
387 		goto try_again;
388 
389 	*adv_credits = advertise;
390 	return got;
391 }
392 
rds_iw_send_add_credits(struct rds_connection * conn,unsigned int credits)393 void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits)
394 {
395 	struct rds_iw_connection *ic = conn->c_transport_data;
396 
397 	if (credits == 0)
398 		return;
399 
400 	rdsdebug("credits=%u current=%u%s\n",
401 			credits,
402 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
403 			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
404 
405 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
406 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
407 		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
408 
409 	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
410 
411 	rds_iw_stats_inc(s_iw_rx_credit_updates);
412 }
413 
rds_iw_advertise_credits(struct rds_connection * conn,unsigned int posted)414 void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted)
415 {
416 	struct rds_iw_connection *ic = conn->c_transport_data;
417 
418 	if (posted == 0)
419 		return;
420 
421 	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
422 
423 	/* Decide whether to send an update to the peer now.
424 	 * If we would send a credit update for every single buffer we
425 	 * post, we would end up with an ACK storm (ACK arrives,
426 	 * consumes buffer, we refill the ring, send ACK to remote
427 	 * advertising the newly posted buffer... ad inf)
428 	 *
429 	 * Performance pretty much depends on how often we send
430 	 * credit updates - too frequent updates mean lots of ACKs.
431 	 * Too infrequent updates, and the peer will run out of
432 	 * credits and has to throttle.
433 	 * For the time being, 16 seems to be a good compromise.
434 	 */
435 	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
436 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
437 }
438 
439 static inline void
rds_iw_xmit_populate_wr(struct rds_iw_connection * ic,struct rds_iw_send_work * send,unsigned int pos,unsigned long buffer,unsigned int length,int send_flags)440 rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
441 		struct rds_iw_send_work *send, unsigned int pos,
442 		unsigned long buffer, unsigned int length,
443 		int send_flags)
444 {
445 	struct ib_sge *sge;
446 
447 	WARN_ON(pos != send - ic->i_sends);
448 
449 	send->s_send_wr.send_flags = send_flags;
450 	send->s_send_wr.opcode = IB_WR_SEND;
451 	send->s_send_wr.num_sge = 2;
452 	send->s_send_wr.next = NULL;
453 	send->s_queued = jiffies;
454 	send->s_op = NULL;
455 
456 	if (length != 0) {
457 		sge = rds_iw_data_sge(ic, send->s_sge);
458 		sge->addr = buffer;
459 		sge->length = length;
460 		sge->lkey = rds_iw_local_dma_lkey(ic);
461 
462 		sge = rds_iw_header_sge(ic, send->s_sge);
463 	} else {
464 		/* We're sending a packet with no payload. There is only
465 		 * one SGE */
466 		send->s_send_wr.num_sge = 1;
467 		sge = &send->s_sge[0];
468 	}
469 
470 	sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
471 	sge->length = sizeof(struct rds_header);
472 	sge->lkey = rds_iw_local_dma_lkey(ic);
473 }
474 
475 /*
476  * This can be called multiple times for a given message.  The first time
477  * we see a message we map its scatterlist into the IB device so that
478  * we can provide that mapped address to the IB scatter gather entries
479  * in the IB work requests.  We translate the scatterlist into a series
480  * of work requests that fragment the message.  These work requests complete
481  * in order so we pass ownership of the message to the completion handler
482  * once we send the final fragment.
483  *
484  * The RDS core uses the c_send_lock to only enter this function once
485  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
486  * don't get out of sync and confuse the ring.
487  */
rds_iw_xmit(struct rds_connection * conn,struct rds_message * rm,unsigned int hdr_off,unsigned int sg,unsigned int off)488 int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
489 		unsigned int hdr_off, unsigned int sg, unsigned int off)
490 {
491 	struct rds_iw_connection *ic = conn->c_transport_data;
492 	struct ib_device *dev = ic->i_cm_id->device;
493 	struct rds_iw_send_work *send = NULL;
494 	struct rds_iw_send_work *first;
495 	struct rds_iw_send_work *prev;
496 	struct ib_send_wr *failed_wr;
497 	struct scatterlist *scat;
498 	u32 pos;
499 	u32 i;
500 	u32 work_alloc;
501 	u32 credit_alloc;
502 	u32 posted;
503 	u32 adv_credits = 0;
504 	int send_flags = 0;
505 	int sent;
506 	int ret;
507 	int flow_controlled = 0;
508 
509 	BUG_ON(off % RDS_FRAG_SIZE);
510 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
511 
512 	/* Fastreg support */
513 	if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) {
514 		ret = -EAGAIN;
515 		goto out;
516 	}
517 
518 	/* FIXME we may overallocate here */
519 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
520 		i = 1;
521 	else
522 		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
523 
524 	work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
525 	if (work_alloc == 0) {
526 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
527 		rds_iw_stats_inc(s_iw_tx_ring_full);
528 		ret = -ENOMEM;
529 		goto out;
530 	}
531 
532 	credit_alloc = work_alloc;
533 	if (ic->i_flowctl) {
534 		credit_alloc = rds_iw_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
535 		adv_credits += posted;
536 		if (credit_alloc < work_alloc) {
537 			rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
538 			work_alloc = credit_alloc;
539 			flow_controlled++;
540 		}
541 		if (work_alloc == 0) {
542 			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
543 			rds_iw_stats_inc(s_iw_tx_throttle);
544 			ret = -ENOMEM;
545 			goto out;
546 		}
547 	}
548 
549 	/* map the message the first time we see it */
550 	if (!ic->i_rm) {
551 		/*
552 		printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
553 				be16_to_cpu(rm->m_inc.i_hdr.h_dport),
554 				rm->m_inc.i_hdr.h_flags,
555 				be32_to_cpu(rm->m_inc.i_hdr.h_len));
556 		   */
557 		if (rm->data.op_nents) {
558 			rm->data.op_count = ib_dma_map_sg(dev,
559 							  rm->data.op_sg,
560 							  rm->data.op_nents,
561 							  DMA_TO_DEVICE);
562 			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
563 			if (rm->data.op_count == 0) {
564 				rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
565 				rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
566 				ret = -ENOMEM; /* XXX ? */
567 				goto out;
568 			}
569 		} else {
570 			rm->data.op_count = 0;
571 		}
572 
573 		ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
574 		ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
575 		rds_message_addref(rm);
576 		rm->data.op_dmasg = 0;
577 		rm->data.op_dmaoff = 0;
578 		ic->i_rm = rm;
579 
580 		/* Finalize the header */
581 		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
582 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
583 		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
584 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
585 
586 		/* If it has a RDMA op, tell the peer we did it. This is
587 		 * used by the peer to release use-once RDMA MRs. */
588 		if (rm->rdma.op_active) {
589 			struct rds_ext_header_rdma ext_hdr;
590 
591 			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
592 			rds_message_add_extension(&rm->m_inc.i_hdr,
593 					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
594 		}
595 		if (rm->m_rdma_cookie) {
596 			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
597 					rds_rdma_cookie_key(rm->m_rdma_cookie),
598 					rds_rdma_cookie_offset(rm->m_rdma_cookie));
599 		}
600 
601 		/* Note - rds_iw_piggyb_ack clears the ACK_REQUIRED bit, so
602 		 * we should not do this unless we have a chance of at least
603 		 * sticking the header into the send ring. Which is why we
604 		 * should call rds_iw_ring_alloc first. */
605 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_iw_piggyb_ack(ic));
606 		rds_message_make_checksum(&rm->m_inc.i_hdr);
607 
608 		/*
609 		 * Update adv_credits since we reset the ACK_REQUIRED bit.
610 		 */
611 		rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
612 		adv_credits += posted;
613 		BUG_ON(adv_credits > 255);
614 	}
615 
616 	send = &ic->i_sends[pos];
617 	first = send;
618 	prev = NULL;
619 	scat = &rm->data.op_sg[rm->data.op_dmasg];
620 	sent = 0;
621 	i = 0;
622 
623 	/* Sometimes you want to put a fence between an RDMA
624 	 * READ and the following SEND.
625 	 * We could either do this all the time
626 	 * or when requested by the user. Right now, we let
627 	 * the application choose.
628 	 */
629 	if (rm->rdma.op_active && rm->rdma.op_fence)
630 		send_flags = IB_SEND_FENCE;
631 
632 	/*
633 	 * We could be copying the header into the unused tail of the page.
634 	 * That would need to be changed in the future when those pages might
635 	 * be mapped userspace pages or page cache pages.  So instead we always
636 	 * use a second sge and our long-lived ring of mapped headers.  We send
637 	 * the header after the data so that the data payload can be aligned on
638 	 * the receiver.
639 	 */
640 
641 	/* handle a 0-len message */
642 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
643 		rds_iw_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
644 		goto add_header;
645 	}
646 
647 	/* if there's data reference it with a chain of work reqs */
648 	for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
649 		unsigned int len;
650 
651 		send = &ic->i_sends[pos];
652 
653 		len = min(RDS_FRAG_SIZE,
654 			  ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
655 		rds_iw_xmit_populate_wr(ic, send, pos,
656 			ib_sg_dma_address(dev, scat) + rm->data.op_dmaoff, len,
657 			send_flags);
658 
659 		/*
660 		 * We want to delay signaling completions just enough to get
661 		 * the batching benefits but not so much that we create dead time
662 		 * on the wire.
663 		 */
664 		if (ic->i_unsignaled_wrs-- == 0) {
665 			ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
666 			send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
667 		}
668 
669 		ic->i_unsignaled_bytes -= len;
670 		if (ic->i_unsignaled_bytes <= 0) {
671 			ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
672 			send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
673 		}
674 
675 		/*
676 		 * Always signal the last one if we're stopping due to flow control.
677 		 */
678 		if (flow_controlled && i == (work_alloc-1))
679 			send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
680 
681 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
682 			 &send->s_send_wr, send->s_send_wr.num_sge, send->s_send_wr.next);
683 
684 		sent += len;
685 		rm->data.op_dmaoff += len;
686 		if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
687 			scat++;
688 			rm->data.op_dmaoff = 0;
689 			rm->data.op_dmasg++;
690 		}
691 
692 add_header:
693 		/* Tack on the header after the data. The header SGE should already
694 		 * have been set up to point to the right header buffer. */
695 		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
696 
697 		if (0) {
698 			struct rds_header *hdr = &ic->i_send_hdrs[pos];
699 
700 			printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
701 				be16_to_cpu(hdr->h_dport),
702 				hdr->h_flags,
703 				be32_to_cpu(hdr->h_len));
704 		}
705 		if (adv_credits) {
706 			struct rds_header *hdr = &ic->i_send_hdrs[pos];
707 
708 			/* add credit and redo the header checksum */
709 			hdr->h_credit = adv_credits;
710 			rds_message_make_checksum(hdr);
711 			adv_credits = 0;
712 			rds_iw_stats_inc(s_iw_tx_credit_updates);
713 		}
714 
715 		if (prev)
716 			prev->s_send_wr.next = &send->s_send_wr;
717 		prev = send;
718 
719 		pos = (pos + 1) % ic->i_send_ring.w_nr;
720 	}
721 
722 	/* Account the RDS header in the number of bytes we sent, but just once.
723 	 * The caller has no concept of fragmentation. */
724 	if (hdr_off == 0)
725 		sent += sizeof(struct rds_header);
726 
727 	/* if we finished the message then send completion owns it */
728 	if (scat == &rm->data.op_sg[rm->data.op_count]) {
729 		prev->s_rm = ic->i_rm;
730 		prev->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
731 		ic->i_rm = NULL;
732 	}
733 
734 	if (i < work_alloc) {
735 		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
736 		work_alloc = i;
737 	}
738 	if (ic->i_flowctl && i < credit_alloc)
739 		rds_iw_send_add_credits(conn, credit_alloc - i);
740 
741 	/* XXX need to worry about failed_wr and partial sends. */
742 	failed_wr = &first->s_send_wr;
743 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_send_wr, &failed_wr);
744 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
745 		 first, &first->s_send_wr, ret, failed_wr);
746 	BUG_ON(failed_wr != &first->s_send_wr);
747 	if (ret) {
748 		printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 "
749 		       "returned %d\n", &conn->c_faddr, ret);
750 		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
751 		if (prev->s_rm) {
752 			ic->i_rm = prev->s_rm;
753 			prev->s_rm = NULL;
754 		}
755 		goto out;
756 	}
757 
758 	ret = sent;
759 out:
760 	BUG_ON(adv_credits);
761 	return ret;
762 }
763 
rds_iw_build_send_reg(struct rds_iw_send_work * send,struct scatterlist * sg,int sg_nents)764 static int rds_iw_build_send_reg(struct rds_iw_send_work *send,
765 				 struct scatterlist *sg,
766 				 int sg_nents)
767 {
768 	int n;
769 
770 	n = ib_map_mr_sg(send->s_mr, sg, sg_nents, PAGE_SIZE);
771 	if (unlikely(n != sg_nents))
772 		return n < 0 ? n : -EINVAL;
773 
774 	send->s_reg_wr.wr.opcode = IB_WR_REG_MR;
775 	send->s_reg_wr.wr.wr_id = 0;
776 	send->s_reg_wr.wr.num_sge = 0;
777 	send->s_reg_wr.mr = send->s_mr;
778 	send->s_reg_wr.key = send->s_mr->rkey;
779 	send->s_reg_wr.access = IB_ACCESS_REMOTE_WRITE;
780 
781 	ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
782 
783 	return 0;
784 }
785 
rds_iw_xmit_rdma(struct rds_connection * conn,struct rm_rdma_op * op)786 int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
787 {
788 	struct rds_iw_connection *ic = conn->c_transport_data;
789 	struct rds_iw_send_work *send = NULL;
790 	struct rds_iw_send_work *first;
791 	struct rds_iw_send_work *prev;
792 	struct ib_send_wr *failed_wr;
793 	struct rds_iw_device *rds_iwdev;
794 	struct scatterlist *scat;
795 	unsigned long len;
796 	u64 remote_addr = op->op_remote_addr;
797 	u32 pos, fr_pos;
798 	u32 work_alloc;
799 	u32 i;
800 	u32 j;
801 	int sent;
802 	int ret;
803 	int num_sge;
804 	int sg_nents;
805 
806 	rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
807 
808 	/* map the message the first time we see it */
809 	if (!op->op_mapped) {
810 		op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
811 					     op->op_sg, op->op_nents, (op->op_write) ?
812 					     DMA_TO_DEVICE : DMA_FROM_DEVICE);
813 		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
814 		if (op->op_count == 0) {
815 			rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
816 			ret = -ENOMEM; /* XXX ? */
817 			goto out;
818 		}
819 
820 		op->op_mapped = 1;
821 	}
822 
823 	if (!op->op_write) {
824 		/* Alloc space on the send queue for the fastreg */
825 		work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
826 		if (work_alloc != 1) {
827 			rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
828 			rds_iw_stats_inc(s_iw_tx_ring_full);
829 			ret = -ENOMEM;
830 			goto out;
831 		}
832 	}
833 
834 	/*
835 	 * Instead of knowing how to return a partial rdma read/write we insist that there
836 	 * be enough work requests to send the entire message.
837 	 */
838 	i = ceil(op->op_count, rds_iwdev->max_sge);
839 
840 	work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
841 	if (work_alloc != i) {
842 		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
843 		rds_iw_stats_inc(s_iw_tx_ring_full);
844 		ret = -ENOMEM;
845 		goto out;
846 	}
847 
848 	send = &ic->i_sends[pos];
849 	if (!op->op_write) {
850 		first = prev = &ic->i_sends[fr_pos];
851 	} else {
852 		first = send;
853 		prev = NULL;
854 	}
855 	scat = &op->op_sg[0];
856 	sent = 0;
857 	num_sge = op->op_count;
858 	sg_nents = 0;
859 
860 	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
861 		send->s_rdma_wr.wr.send_flags = 0;
862 		send->s_queued = jiffies;
863 
864 		/*
865 		 * We want to delay signaling completions just enough to get
866 		 * the batching benefits but not so much that we create dead time on the wire.
867 		 */
868 		if (ic->i_unsignaled_wrs-- == 0) {
869 			ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
870 			send->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
871 		}
872 
873 		/* To avoid the need to have the plumbing to invalidate the fastreg_mr used
874 		 * for local access after RDS is finished with it, using
875 		 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
876 		 */
877 		if (op->op_write)
878 			send->s_rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
879 		else
880 			send->s_rdma_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
881 
882 		send->s_rdma_wr.remote_addr = remote_addr;
883 		send->s_rdma_wr.rkey = op->op_rkey;
884 		send->s_op = op;
885 
886 		if (num_sge > rds_iwdev->max_sge) {
887 			send->s_rdma_wr.wr.num_sge = rds_iwdev->max_sge;
888 			num_sge -= rds_iwdev->max_sge;
889 		} else
890 			send->s_rdma_wr.wr.num_sge = num_sge;
891 
892 		send->s_rdma_wr.wr.next = NULL;
893 
894 		if (prev)
895 			prev->s_send_wr.next = &send->s_rdma_wr.wr;
896 
897 		for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
898 		     scat != &op->op_sg[op->op_count]; j++) {
899 			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
900 
901 			if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV)
902 				sg_nents++;
903 			else {
904 				send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat);
905 				send->s_sge[j].length = len;
906 				send->s_sge[j].lkey = rds_iw_local_dma_lkey(ic);
907 			}
908 
909 			sent += len;
910 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
911 			remote_addr += len;
912 
913 			scat++;
914 		}
915 
916 		if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) {
917 			send->s_rdma_wr.wr.num_sge = 1;
918 			send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr;
919 			send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes;
920 			send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey;
921 		}
922 
923 		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
924 			&send->s_rdma_wr,
925 			send->s_rdma_wr.wr.num_sge,
926 			send->s_rdma_wr.wr.next);
927 
928 		prev = send;
929 		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
930 			send = ic->i_sends;
931 	}
932 
933 	/* if we finished the message then send completion owns it */
934 	if (scat == &op->op_sg[op->op_count])
935 		first->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
936 
937 	if (i < work_alloc) {
938 		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
939 		work_alloc = i;
940 	}
941 
942 	/* On iWARP, local memory access by a remote system (ie, RDMA Read) is not
943 	 * recommended.  Putting the lkey on the wire is a security hole, as it can
944 	 * allow for memory access to all of memory on the remote system.  Some
945 	 * adapters do not allow using the lkey for this at all.  To bypass this use a
946 	 * fastreg_mr (or possibly a dma_mr)
947 	 */
948 	if (!op->op_write) {
949 		ret = rds_iw_build_send_reg(&ic->i_sends[fr_pos],
950 					    &op->op_sg[0], sg_nents);
951 		if (ret) {
952 			printk(KERN_WARNING "RDS/IW: failed to reg send mem\n");
953 			goto out;
954 		}
955 		work_alloc++;
956 	}
957 
958 	failed_wr = &first->s_rdma_wr.wr;
959 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
960 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
961 		 first, &first->s_rdma_wr, ret, failed_wr);
962 	BUG_ON(failed_wr != &first->s_rdma_wr.wr);
963 	if (ret) {
964 		printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 "
965 		       "returned %d\n", &conn->c_faddr, ret);
966 		rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
967 		goto out;
968 	}
969 
970 out:
971 	return ret;
972 }
973 
rds_iw_xmit_complete(struct rds_connection * conn)974 void rds_iw_xmit_complete(struct rds_connection *conn)
975 {
976 	struct rds_iw_connection *ic = conn->c_transport_data;
977 
978 	/* We may have a pending ACK or window update we were unable
979 	 * to send previously (due to flow control). Try again. */
980 	rds_iw_attempt_ack(ic);
981 }
982