1/*
2 * Copyright (c) 2006 Oracle.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h>
38
39#include "rds.h"
40#include "iw.h"
41
42static struct kmem_cache *rds_iw_incoming_slab;
43static struct kmem_cache *rds_iw_frag_slab;
44static atomic_t	rds_iw_allocation = ATOMIC_INIT(0);
45
46static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
47{
48	rdsdebug("frag %p page %p\n", frag, frag->f_page);
49	__free_page(frag->f_page);
50	frag->f_page = NULL;
51}
52
53static void rds_iw_frag_free(struct rds_page_frag *frag)
54{
55	rdsdebug("frag %p page %p\n", frag, frag->f_page);
56	BUG_ON(frag->f_page);
57	kmem_cache_free(rds_iw_frag_slab, frag);
58}
59
60/*
61 * We map a page at a time.  Its fragments are posted in order.  This
62 * is called in fragment order as the fragments get send completion events.
63 * Only the last frag in the page performs the unmapping.
64 *
65 * It's OK for ring cleanup to call this in whatever order it likes because
66 * DMA is not in flight and so we can unmap while other ring entries still
67 * hold page references in their frags.
68 */
69static void rds_iw_recv_unmap_page(struct rds_iw_connection *ic,
70				   struct rds_iw_recv_work *recv)
71{
72	struct rds_page_frag *frag = recv->r_frag;
73
74	rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
75	if (frag->f_mapped)
76		ib_dma_unmap_page(ic->i_cm_id->device,
77			       frag->f_mapped,
78			       RDS_FRAG_SIZE, DMA_FROM_DEVICE);
79	frag->f_mapped = 0;
80}
81
82void rds_iw_recv_init_ring(struct rds_iw_connection *ic)
83{
84	struct rds_iw_recv_work *recv;
85	u32 i;
86
87	for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
88		struct ib_sge *sge;
89
90		recv->r_iwinc = NULL;
91		recv->r_frag = NULL;
92
93		recv->r_wr.next = NULL;
94		recv->r_wr.wr_id = i;
95		recv->r_wr.sg_list = recv->r_sge;
96		recv->r_wr.num_sge = RDS_IW_RECV_SGE;
97
98		sge = rds_iw_data_sge(ic, recv->r_sge);
99		sge->addr = 0;
100		sge->length = RDS_FRAG_SIZE;
101		sge->lkey = 0;
102
103		sge = rds_iw_header_sge(ic, recv->r_sge);
104		sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
105		sge->length = sizeof(struct rds_header);
106		sge->lkey = 0;
107	}
108}
109
110static void rds_iw_recv_clear_one(struct rds_iw_connection *ic,
111				  struct rds_iw_recv_work *recv)
112{
113	if (recv->r_iwinc) {
114		rds_inc_put(&recv->r_iwinc->ii_inc);
115		recv->r_iwinc = NULL;
116	}
117	if (recv->r_frag) {
118		rds_iw_recv_unmap_page(ic, recv);
119		if (recv->r_frag->f_page)
120			rds_iw_frag_drop_page(recv->r_frag);
121		rds_iw_frag_free(recv->r_frag);
122		recv->r_frag = NULL;
123	}
124}
125
126void rds_iw_recv_clear_ring(struct rds_iw_connection *ic)
127{
128	u32 i;
129
130	for (i = 0; i < ic->i_recv_ring.w_nr; i++)
131		rds_iw_recv_clear_one(ic, &ic->i_recvs[i]);
132
133	if (ic->i_frag.f_page)
134		rds_iw_frag_drop_page(&ic->i_frag);
135}
136
137static int rds_iw_recv_refill_one(struct rds_connection *conn,
138				  struct rds_iw_recv_work *recv,
139				  gfp_t kptr_gfp, gfp_t page_gfp)
140{
141	struct rds_iw_connection *ic = conn->c_transport_data;
142	dma_addr_t dma_addr;
143	struct ib_sge *sge;
144	int ret = -ENOMEM;
145
146	if (!recv->r_iwinc) {
147		if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
148			rds_iw_stats_inc(s_iw_rx_alloc_limit);
149			goto out;
150		}
151		recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
152						 kptr_gfp);
153		if (!recv->r_iwinc) {
154			atomic_dec(&rds_iw_allocation);
155			goto out;
156		}
157		INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
158		rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
159	}
160
161	if (!recv->r_frag) {
162		recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
163		if (!recv->r_frag)
164			goto out;
165		INIT_LIST_HEAD(&recv->r_frag->f_item);
166		recv->r_frag->f_page = NULL;
167	}
168
169	if (!ic->i_frag.f_page) {
170		ic->i_frag.f_page = alloc_page(page_gfp);
171		if (!ic->i_frag.f_page)
172			goto out;
173		ic->i_frag.f_offset = 0;
174	}
175
176	dma_addr = ib_dma_map_page(ic->i_cm_id->device,
177				  ic->i_frag.f_page,
178				  ic->i_frag.f_offset,
179				  RDS_FRAG_SIZE,
180				  DMA_FROM_DEVICE);
181	if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
182		goto out;
183
184	/*
185	 * Once we get the RDS_PAGE_LAST_OFF frag then rds_iw_frag_unmap()
186	 * must be called on this recv.  This happens as completions hit
187	 * in order or on connection shutdown.
188	 */
189	recv->r_frag->f_page = ic->i_frag.f_page;
190	recv->r_frag->f_offset = ic->i_frag.f_offset;
191	recv->r_frag->f_mapped = dma_addr;
192
193	sge = rds_iw_data_sge(ic, recv->r_sge);
194	sge->addr = dma_addr;
195	sge->length = RDS_FRAG_SIZE;
196
197	sge = rds_iw_header_sge(ic, recv->r_sge);
198	sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
199	sge->length = sizeof(struct rds_header);
200
201	get_page(recv->r_frag->f_page);
202
203	if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
204		ic->i_frag.f_offset += RDS_FRAG_SIZE;
205	} else {
206		put_page(ic->i_frag.f_page);
207		ic->i_frag.f_page = NULL;
208		ic->i_frag.f_offset = 0;
209	}
210
211	ret = 0;
212out:
213	return ret;
214}
215
216/*
217 * This tries to allocate and post unused work requests after making sure that
218 * they have all the allocations they need to queue received fragments into
219 * sockets.  The i_recv_mutex is held here so that ring_alloc and _unalloc
220 * pairs don't go unmatched.
221 *
222 * -1 is returned if posting fails due to temporary resource exhaustion.
223 */
224int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
225		       gfp_t page_gfp, int prefill)
226{
227	struct rds_iw_connection *ic = conn->c_transport_data;
228	struct rds_iw_recv_work *recv;
229	struct ib_recv_wr *failed_wr;
230	unsigned int posted = 0;
231	int ret = 0;
232	u32 pos;
233
234	while ((prefill || rds_conn_up(conn)) &&
235	       rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
236		if (pos >= ic->i_recv_ring.w_nr) {
237			printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
238					pos);
239			ret = -EINVAL;
240			break;
241		}
242
243		recv = &ic->i_recvs[pos];
244		ret = rds_iw_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
245		if (ret) {
246			ret = -1;
247			break;
248		}
249
250		/* XXX when can this fail? */
251		ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
252		rdsdebug("recv %p iwinc %p page %p addr %lu ret %d\n", recv,
253			 recv->r_iwinc, recv->r_frag->f_page,
254			 (long) recv->r_frag->f_mapped, ret);
255		if (ret) {
256			rds_iw_conn_error(conn, "recv post on "
257			       "%pI4 returned %d, disconnecting and "
258			       "reconnecting\n", &conn->c_faddr,
259			       ret);
260			ret = -1;
261			break;
262		}
263
264		posted++;
265	}
266
267	/* We're doing flow control - update the window. */
268	if (ic->i_flowctl && posted)
269		rds_iw_advertise_credits(conn, posted);
270
271	if (ret)
272		rds_iw_ring_unalloc(&ic->i_recv_ring, 1);
273	return ret;
274}
275
276static void rds_iw_inc_purge(struct rds_incoming *inc)
277{
278	struct rds_iw_incoming *iwinc;
279	struct rds_page_frag *frag;
280	struct rds_page_frag *pos;
281
282	iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
283	rdsdebug("purging iwinc %p inc %p\n", iwinc, inc);
284
285	list_for_each_entry_safe(frag, pos, &iwinc->ii_frags, f_item) {
286		list_del_init(&frag->f_item);
287		rds_iw_frag_drop_page(frag);
288		rds_iw_frag_free(frag);
289	}
290}
291
292void rds_iw_inc_free(struct rds_incoming *inc)
293{
294	struct rds_iw_incoming *iwinc;
295
296	iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
297
298	rds_iw_inc_purge(inc);
299	rdsdebug("freeing iwinc %p inc %p\n", iwinc, inc);
300	BUG_ON(!list_empty(&iwinc->ii_frags));
301	kmem_cache_free(rds_iw_incoming_slab, iwinc);
302	atomic_dec(&rds_iw_allocation);
303	BUG_ON(atomic_read(&rds_iw_allocation) < 0);
304}
305
306int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
307{
308	struct rds_iw_incoming *iwinc;
309	struct rds_page_frag *frag;
310	unsigned long to_copy;
311	unsigned long frag_off = 0;
312	int copied = 0;
313	int ret;
314	u32 len;
315
316	iwinc = container_of(inc, struct rds_iw_incoming, ii_inc);
317	frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
318	len = be32_to_cpu(inc->i_hdr.h_len);
319
320	while (iov_iter_count(to) && copied < len) {
321		if (frag_off == RDS_FRAG_SIZE) {
322			frag = list_entry(frag->f_item.next,
323					  struct rds_page_frag, f_item);
324			frag_off = 0;
325		}
326		to_copy = min_t(unsigned long, iov_iter_count(to),
327				RDS_FRAG_SIZE - frag_off);
328		to_copy = min_t(unsigned long, to_copy, len - copied);
329
330		/* XXX needs + offset for multiple recvs per page */
331		rds_stats_add(s_copy_to_user, to_copy);
332		ret = copy_page_to_iter(frag->f_page,
333					frag->f_offset + frag_off,
334					to_copy,
335					to);
336		if (ret != to_copy)
337			return -EFAULT;
338
339		frag_off += to_copy;
340		copied += to_copy;
341	}
342
343	return copied;
344}
345
346/* ic starts out kzalloc()ed */
347void rds_iw_recv_init_ack(struct rds_iw_connection *ic)
348{
349	struct ib_send_wr *wr = &ic->i_ack_wr;
350	struct ib_sge *sge = &ic->i_ack_sge;
351
352	sge->addr = ic->i_ack_dma;
353	sge->length = sizeof(struct rds_header);
354	sge->lkey = rds_iw_local_dma_lkey(ic);
355
356	wr->sg_list = sge;
357	wr->num_sge = 1;
358	wr->opcode = IB_WR_SEND;
359	wr->wr_id = RDS_IW_ACK_WR_ID;
360	wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
361}
362
363/*
364 * You'd think that with reliable IB connections you wouldn't need to ack
365 * messages that have been received.  The problem is that IB hardware generates
366 * an ack message before it has DMAed the message into memory.  This creates a
367 * potential message loss if the HCA is disabled for any reason between when it
368 * sends the ack and before the message is DMAed and processed.  This is only a
369 * potential issue if another HCA is available for fail-over.
370 *
371 * When the remote host receives our ack they'll free the sent message from
372 * their send queue.  To decrease the latency of this we always send an ack
373 * immediately after we've received messages.
374 *
375 * For simplicity, we only have one ack in flight at a time.  This puts
376 * pressure on senders to have deep enough send queues to absorb the latency of
377 * a single ack frame being in flight.  This might not be good enough.
378 *
379 * This is implemented by have a long-lived send_wr and sge which point to a
380 * statically allocated ack frame.  This ack wr does not fall under the ring
381 * accounting that the tx and rx wrs do.  The QP attribute specifically makes
382 * room for it beyond the ring size.  Send completion notices its special
383 * wr_id and avoids working with the ring in that case.
384 */
385#ifndef KERNEL_HAS_ATOMIC64
386static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
387				int ack_required)
388{
389	unsigned long flags;
390
391	spin_lock_irqsave(&ic->i_ack_lock, flags);
392	ic->i_ack_next = seq;
393	if (ack_required)
394		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
395	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
396}
397
398static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
399{
400	unsigned long flags;
401	u64 seq;
402
403	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
404
405	spin_lock_irqsave(&ic->i_ack_lock, flags);
406	seq = ic->i_ack_next;
407	spin_unlock_irqrestore(&ic->i_ack_lock, flags);
408
409	return seq;
410}
411#else
412static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
413				int ack_required)
414{
415	atomic64_set(&ic->i_ack_next, seq);
416	if (ack_required) {
417		smp_mb__before_atomic();
418		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
419	}
420}
421
422static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
423{
424	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
425	smp_mb__after_atomic();
426
427	return atomic64_read(&ic->i_ack_next);
428}
429#endif
430
431
432static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits)
433{
434	struct rds_header *hdr = ic->i_ack;
435	struct ib_send_wr *failed_wr;
436	u64 seq;
437	int ret;
438
439	seq = rds_iw_get_ack(ic);
440
441	rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
442	rds_message_populate_header(hdr, 0, 0, 0);
443	hdr->h_ack = cpu_to_be64(seq);
444	hdr->h_credit = adv_credits;
445	rds_message_make_checksum(hdr);
446	ic->i_ack_queued = jiffies;
447
448	ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
449	if (unlikely(ret)) {
450		/* Failed to send. Release the WR, and
451		 * force another ACK.
452		 */
453		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
454		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
455
456		rds_iw_stats_inc(s_iw_ack_send_failure);
457
458		rds_iw_conn_error(ic->conn, "sending ack failed\n");
459	} else
460		rds_iw_stats_inc(s_iw_ack_sent);
461}
462
463/*
464 * There are 3 ways of getting acknowledgements to the peer:
465 *  1.	We call rds_iw_attempt_ack from the recv completion handler
466 *	to send an ACK-only frame.
467 *	However, there can be only one such frame in the send queue
468 *	at any time, so we may have to postpone it.
469 *  2.	When another (data) packet is transmitted while there's
470 *	an ACK in the queue, we piggyback the ACK sequence number
471 *	on the data packet.
472 *  3.	If the ACK WR is done sending, we get called from the
473 *	send queue completion handler, and check whether there's
474 *	another ACK pending (postponed because the WR was on the
475 *	queue). If so, we transmit it.
476 *
477 * We maintain 2 variables:
478 *  -	i_ack_flags, which keeps track of whether the ACK WR
479 *	is currently in the send queue or not (IB_ACK_IN_FLIGHT)
480 *  -	i_ack_next, which is the last sequence number we received
481 *
482 * Potentially, send queue and receive queue handlers can run concurrently.
483 * It would be nice to not have to use a spinlock to synchronize things,
484 * but the one problem that rules this out is that 64bit updates are
485 * not atomic on all platforms. Things would be a lot simpler if
486 * we had atomic64 or maybe cmpxchg64 everywhere.
487 *
488 * Reconnecting complicates this picture just slightly. When we
489 * reconnect, we may be seeing duplicate packets. The peer
490 * is retransmitting them, because it hasn't seen an ACK for
491 * them. It is important that we ACK these.
492 *
493 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
494 * this flag set *MUST* be acknowledged immediately.
495 */
496
497/*
498 * When we get here, we're called from the recv queue handler.
499 * Check whether we ought to transmit an ACK.
500 */
501void rds_iw_attempt_ack(struct rds_iw_connection *ic)
502{
503	unsigned int adv_credits;
504
505	if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
506		return;
507
508	if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
509		rds_iw_stats_inc(s_iw_ack_send_delayed);
510		return;
511	}
512
513	/* Can we get a send credit? */
514	if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
515		rds_iw_stats_inc(s_iw_tx_throttle);
516		clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
517		return;
518	}
519
520	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
521	rds_iw_send_ack(ic, adv_credits);
522}
523
524/*
525 * We get here from the send completion handler, when the
526 * adapter tells us the ACK frame was sent.
527 */
528void rds_iw_ack_send_complete(struct rds_iw_connection *ic)
529{
530	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
531	rds_iw_attempt_ack(ic);
532}
533
534/*
535 * This is called by the regular xmit code when it wants to piggyback
536 * an ACK on an outgoing frame.
537 */
538u64 rds_iw_piggyb_ack(struct rds_iw_connection *ic)
539{
540	if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
541		rds_iw_stats_inc(s_iw_ack_send_piggybacked);
542	return rds_iw_get_ack(ic);
543}
544
545/*
546 * It's kind of lame that we're copying from the posted receive pages into
547 * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
548 * them.  But receiving new congestion bitmaps should be a *rare* event, so
549 * hopefully we won't need to invest that complexity in making it more
550 * efficient.  By copying we can share a simpler core with TCP which has to
551 * copy.
552 */
553static void rds_iw_cong_recv(struct rds_connection *conn,
554			      struct rds_iw_incoming *iwinc)
555{
556	struct rds_cong_map *map;
557	unsigned int map_off;
558	unsigned int map_page;
559	struct rds_page_frag *frag;
560	unsigned long frag_off;
561	unsigned long to_copy;
562	unsigned long copied;
563	uint64_t uncongested = 0;
564	void *addr;
565
566	/* catch completely corrupt packets */
567	if (be32_to_cpu(iwinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
568		return;
569
570	map = conn->c_fcong;
571	map_page = 0;
572	map_off = 0;
573
574	frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item);
575	frag_off = 0;
576
577	copied = 0;
578
579	while (copied < RDS_CONG_MAP_BYTES) {
580		uint64_t *src, *dst;
581		unsigned int k;
582
583		to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
584		BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
585
586		addr = kmap_atomic(frag->f_page);
587
588		src = addr + frag_off;
589		dst = (void *)map->m_page_addrs[map_page] + map_off;
590		for (k = 0; k < to_copy; k += 8) {
591			/* Record ports that became uncongested, ie
592			 * bits that changed from 0 to 1. */
593			uncongested |= ~(*src) & *dst;
594			*dst++ = *src++;
595		}
596		kunmap_atomic(addr);
597
598		copied += to_copy;
599
600		map_off += to_copy;
601		if (map_off == PAGE_SIZE) {
602			map_off = 0;
603			map_page++;
604		}
605
606		frag_off += to_copy;
607		if (frag_off == RDS_FRAG_SIZE) {
608			frag = list_entry(frag->f_item.next,
609					  struct rds_page_frag, f_item);
610			frag_off = 0;
611		}
612	}
613
614	/* the congestion map is in little endian order */
615	uncongested = le64_to_cpu(uncongested);
616
617	rds_cong_map_updated(map, uncongested);
618}
619
620/*
621 * Rings are posted with all the allocations they'll need to queue the
622 * incoming message to the receiving socket so this can't fail.
623 * All fragments start with a header, so we can make sure we're not receiving
624 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
625 */
626struct rds_iw_ack_state {
627	u64		ack_next;
628	u64		ack_recv;
629	unsigned int	ack_required:1;
630	unsigned int	ack_next_valid:1;
631	unsigned int	ack_recv_valid:1;
632};
633
634static void rds_iw_process_recv(struct rds_connection *conn,
635				struct rds_iw_recv_work *recv, u32 byte_len,
636				struct rds_iw_ack_state *state)
637{
638	struct rds_iw_connection *ic = conn->c_transport_data;
639	struct rds_iw_incoming *iwinc = ic->i_iwinc;
640	struct rds_header *ihdr, *hdr;
641
642	/* XXX shut down the connection if port 0,0 are seen? */
643
644	rdsdebug("ic %p iwinc %p recv %p byte len %u\n", ic, iwinc, recv,
645		 byte_len);
646
647	if (byte_len < sizeof(struct rds_header)) {
648		rds_iw_conn_error(conn, "incoming message "
649		       "from %pI4 didn't include a "
650		       "header, disconnecting and "
651		       "reconnecting\n",
652		       &conn->c_faddr);
653		return;
654	}
655	byte_len -= sizeof(struct rds_header);
656
657	ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
658
659	/* Validate the checksum. */
660	if (!rds_message_verify_checksum(ihdr)) {
661		rds_iw_conn_error(conn, "incoming message "
662		       "from %pI4 has corrupted header - "
663		       "forcing a reconnect\n",
664		       &conn->c_faddr);
665		rds_stats_inc(s_recv_drop_bad_checksum);
666		return;
667	}
668
669	/* Process the ACK sequence which comes with every packet */
670	state->ack_recv = be64_to_cpu(ihdr->h_ack);
671	state->ack_recv_valid = 1;
672
673	/* Process the credits update if there was one */
674	if (ihdr->h_credit)
675		rds_iw_send_add_credits(conn, ihdr->h_credit);
676
677	if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) {
678		/* This is an ACK-only packet. The fact that it gets
679		 * special treatment here is that historically, ACKs
680		 * were rather special beasts.
681		 */
682		rds_iw_stats_inc(s_iw_ack_received);
683
684		/*
685		 * Usually the frags make their way on to incs and are then freed as
686		 * the inc is freed.  We don't go that route, so we have to drop the
687		 * page ref ourselves.  We can't just leave the page on the recv
688		 * because that confuses the dma mapping of pages and each recv's use
689		 * of a partial page.  We can leave the frag, though, it will be
690		 * reused.
691		 *
692		 * FIXME: Fold this into the code path below.
693		 */
694		rds_iw_frag_drop_page(recv->r_frag);
695		return;
696	}
697
698	/*
699	 * If we don't already have an inc on the connection then this
700	 * fragment has a header and starts a message.. copy its header
701	 * into the inc and save the inc so we can hang upcoming fragments
702	 * off its list.
703	 */
704	if (!iwinc) {
705		iwinc = recv->r_iwinc;
706		recv->r_iwinc = NULL;
707		ic->i_iwinc = iwinc;
708
709		hdr = &iwinc->ii_inc.i_hdr;
710		memcpy(hdr, ihdr, sizeof(*hdr));
711		ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
712
713		rdsdebug("ic %p iwinc %p rem %u flag 0x%x\n", ic, iwinc,
714			 ic->i_recv_data_rem, hdr->h_flags);
715	} else {
716		hdr = &iwinc->ii_inc.i_hdr;
717		/* We can't just use memcmp here; fragments of a
718		 * single message may carry different ACKs */
719		if (hdr->h_sequence != ihdr->h_sequence ||
720		    hdr->h_len != ihdr->h_len ||
721		    hdr->h_sport != ihdr->h_sport ||
722		    hdr->h_dport != ihdr->h_dport) {
723			rds_iw_conn_error(conn,
724				"fragment header mismatch; forcing reconnect\n");
725			return;
726		}
727	}
728
729	list_add_tail(&recv->r_frag->f_item, &iwinc->ii_frags);
730	recv->r_frag = NULL;
731
732	if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
733		ic->i_recv_data_rem -= RDS_FRAG_SIZE;
734	else {
735		ic->i_recv_data_rem = 0;
736		ic->i_iwinc = NULL;
737
738		if (iwinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
739			rds_iw_cong_recv(conn, iwinc);
740		else {
741			rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
742					  &iwinc->ii_inc, GFP_ATOMIC);
743			state->ack_next = be64_to_cpu(hdr->h_sequence);
744			state->ack_next_valid = 1;
745		}
746
747		/* Evaluate the ACK_REQUIRED flag *after* we received
748		 * the complete frame, and after bumping the next_rx
749		 * sequence. */
750		if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
751			rds_stats_inc(s_recv_ack_required);
752			state->ack_required = 1;
753		}
754
755		rds_inc_put(&iwinc->ii_inc);
756	}
757}
758
759/*
760 * Plucking the oldest entry from the ring can be done concurrently with
761 * the thread refilling the ring.  Each ring operation is protected by
762 * spinlocks and the transient state of refilling doesn't change the
763 * recording of which entry is oldest.
764 *
765 * This relies on IB only calling one cq comp_handler for each cq so that
766 * there will only be one caller of rds_recv_incoming() per RDS connection.
767 */
768void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
769{
770	struct rds_connection *conn = context;
771	struct rds_iw_connection *ic = conn->c_transport_data;
772
773	rdsdebug("conn %p cq %p\n", conn, cq);
774
775	rds_iw_stats_inc(s_iw_rx_cq_call);
776
777	tasklet_schedule(&ic->i_recv_tasklet);
778}
779
780static inline void rds_poll_cq(struct rds_iw_connection *ic,
781			       struct rds_iw_ack_state *state)
782{
783	struct rds_connection *conn = ic->conn;
784	struct ib_wc wc;
785	struct rds_iw_recv_work *recv;
786
787	while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
788		rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
789			 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
790			 be32_to_cpu(wc.ex.imm_data));
791		rds_iw_stats_inc(s_iw_rx_cq_event);
792
793		recv = &ic->i_recvs[rds_iw_ring_oldest(&ic->i_recv_ring)];
794
795		rds_iw_recv_unmap_page(ic, recv);
796
797		/*
798		 * Also process recvs in connecting state because it is possible
799		 * to get a recv completion _before_ the rdmacm ESTABLISHED
800		 * event is processed.
801		 */
802		if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
803			/* We expect errors as the qp is drained during shutdown */
804			if (wc.status == IB_WC_SUCCESS) {
805				rds_iw_process_recv(conn, recv, wc.byte_len, state);
806			} else {
807				rds_iw_conn_error(conn, "recv completion on "
808				       "%pI4 had status %u, disconnecting and "
809				       "reconnecting\n", &conn->c_faddr,
810				       wc.status);
811			}
812		}
813
814		rds_iw_ring_free(&ic->i_recv_ring, 1);
815	}
816}
817
818void rds_iw_recv_tasklet_fn(unsigned long data)
819{
820	struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
821	struct rds_connection *conn = ic->conn;
822	struct rds_iw_ack_state state = { 0, };
823
824	rds_poll_cq(ic, &state);
825	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
826	rds_poll_cq(ic, &state);
827
828	if (state.ack_next_valid)
829		rds_iw_set_ack(ic, state.ack_next, state.ack_required);
830	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
831		rds_send_drop_acked(conn, state.ack_recv, NULL);
832		ic->i_ack_recv = state.ack_recv;
833	}
834	if (rds_conn_up(conn))
835		rds_iw_attempt_ack(ic);
836
837	/* If we ever end up with a really empty receive ring, we're
838	 * in deep trouble, as the sender will definitely see RNR
839	 * timeouts. */
840	if (rds_iw_ring_empty(&ic->i_recv_ring))
841		rds_iw_stats_inc(s_iw_rx_ring_empty);
842
843	/*
844	 * If the ring is running low, then schedule the thread to refill.
845	 */
846	if (rds_iw_ring_low(&ic->i_recv_ring))
847		queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
848}
849
850int rds_iw_recv(struct rds_connection *conn)
851{
852	struct rds_iw_connection *ic = conn->c_transport_data;
853	int ret = 0;
854
855	rdsdebug("conn %p\n", conn);
856
857	/*
858	 * If we get a temporary posting failure in this context then
859	 * we're really low and we want the caller to back off for a bit.
860	 */
861	mutex_lock(&ic->i_recv_mutex);
862	if (rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
863		ret = -ENOMEM;
864	else
865		rds_iw_stats_inc(s_iw_rx_refill_from_thread);
866	mutex_unlock(&ic->i_recv_mutex);
867
868	if (rds_conn_up(conn))
869		rds_iw_attempt_ack(ic);
870
871	return ret;
872}
873
874int rds_iw_recv_init(void)
875{
876	struct sysinfo si;
877	int ret = -ENOMEM;
878
879	/* Default to 30% of all available RAM for recv memory */
880	si_meminfo(&si);
881	rds_iw_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
882
883	rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
884					sizeof(struct rds_iw_incoming),
885					0, 0, NULL);
886	if (!rds_iw_incoming_slab)
887		goto out;
888
889	rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
890					sizeof(struct rds_page_frag),
891					0, 0, NULL);
892	if (!rds_iw_frag_slab)
893		kmem_cache_destroy(rds_iw_incoming_slab);
894	else
895		ret = 0;
896out:
897	return ret;
898}
899
900void rds_iw_recv_exit(void)
901{
902	kmem_cache_destroy(rds_iw_incoming_slab);
903	kmem_cache_destroy(rds_iw_frag_slab);
904}
905