1#ifndef _RDS_RDS_H
2#define _RDS_RDS_H
3
4#include <net/sock.h>
5#include <linux/scatterlist.h>
6#include <linux/highmem.h>
7#include <rdma/rdma_cm.h>
8#include <linux/mutex.h>
9#include <linux/rds.h>
10#include <linux/rhashtable.h>
11
12#include "info.h"
13
14/*
15 * RDS Network protocol version
16 */
17#define RDS_PROTOCOL_3_0	0x0300
18#define RDS_PROTOCOL_3_1	0x0301
19#define RDS_PROTOCOL_VERSION	RDS_PROTOCOL_3_1
20#define RDS_PROTOCOL_MAJOR(v)	((v) >> 8)
21#define RDS_PROTOCOL_MINOR(v)	((v) & 255)
22#define RDS_PROTOCOL(maj, min)	(((maj) << 8) | min)
23
24/*
25 * XXX randomly chosen, but at least seems to be unused:
26 * #               18464-18768 Unassigned
27 * We should do better.  We want a reserved port to discourage unpriv'ed
28 * userspace from listening.
29 */
30#define RDS_PORT	18634
31
32#ifdef ATOMIC64_INIT
33#define KERNEL_HAS_ATOMIC64
34#endif
35
36#ifdef DEBUG
37#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38#else
39/* sigh, pr_debug() causes unused variable warnings */
40static inline __printf(1, 2)
41void rdsdebug(char *fmt, ...)
42{
43}
44#endif
45
46/* XXX is there one of these somewhere? */
47#define ceil(x, y) \
48	({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49
50#define RDS_FRAG_SHIFT	12
51#define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
52
53#define RDS_CONG_MAP_BYTES	(65536 / 8)
54#define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
55#define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
56
57struct rds_cong_map {
58	struct rb_node		m_rb_node;
59	__be32			m_addr;
60	wait_queue_head_t	m_waitq;
61	struct list_head	m_conn_list;
62	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
63};
64
65
66/*
67 * This is how we will track the connection state:
68 * A connection is always in one of the following
69 * states. Updates to the state are atomic and imply
70 * a memory barrier.
71 */
72enum {
73	RDS_CONN_DOWN = 0,
74	RDS_CONN_CONNECTING,
75	RDS_CONN_DISCONNECTING,
76	RDS_CONN_UP,
77	RDS_CONN_ERROR,
78};
79
80/* Bits for c_flags */
81#define RDS_LL_SEND_FULL	0
82#define RDS_RECONNECT_PENDING	1
83#define RDS_IN_XMIT		2
84#define RDS_RECV_REFILL		3
85
86struct rds_connection {
87	struct hlist_node	c_hash_node;
88	__be32			c_laddr;
89	__be32			c_faddr;
90	unsigned int		c_loopback:1,
91				c_outgoing:1,
92				c_pad_to_32:30;
93	struct rds_connection	*c_passive;
94
95	struct rds_cong_map	*c_lcong;
96	struct rds_cong_map	*c_fcong;
97
98	struct rds_message	*c_xmit_rm;
99	unsigned long		c_xmit_sg;
100	unsigned int		c_xmit_hdr_off;
101	unsigned int		c_xmit_data_off;
102	unsigned int		c_xmit_atomic_sent;
103	unsigned int		c_xmit_rdma_sent;
104	unsigned int		c_xmit_data_sent;
105
106	spinlock_t		c_lock;		/* protect msg queues */
107	u64			c_next_tx_seq;
108	struct list_head	c_send_queue;
109	struct list_head	c_retrans;
110
111	u64			c_next_rx_seq;
112
113	struct rds_transport	*c_trans;
114	void			*c_transport_data;
115
116	atomic_t		c_state;
117	unsigned long		c_send_gen;
118	unsigned long		c_flags;
119	unsigned long		c_reconnect_jiffies;
120	struct delayed_work	c_send_w;
121	struct delayed_work	c_recv_w;
122	struct delayed_work	c_conn_w;
123	struct work_struct	c_down_w;
124	struct mutex		c_cm_lock;	/* protect conn state & cm */
125	wait_queue_head_t	c_waitq;
126
127	struct list_head	c_map_item;
128	unsigned long		c_map_queued;
129
130	unsigned int		c_unacked_packets;
131	unsigned int		c_unacked_bytes;
132
133	/* Protocol version */
134	unsigned int		c_version;
135	possible_net_t		c_net;
136};
137
138static inline
139struct net *rds_conn_net(struct rds_connection *conn)
140{
141	return read_pnet(&conn->c_net);
142}
143
144static inline
145void rds_conn_net_set(struct rds_connection *conn, struct net *net)
146{
147	write_pnet(&conn->c_net, net);
148}
149
150#define RDS_FLAG_CONG_BITMAP	0x01
151#define RDS_FLAG_ACK_REQUIRED	0x02
152#define RDS_FLAG_RETRANSMITTED	0x04
153#define RDS_MAX_ADV_CREDIT	255
154
155/*
156 * Maximum space available for extension headers.
157 */
158#define RDS_HEADER_EXT_SPACE	16
159
160struct rds_header {
161	__be64	h_sequence;
162	__be64	h_ack;
163	__be32	h_len;
164	__be16	h_sport;
165	__be16	h_dport;
166	u8	h_flags;
167	u8	h_credit;
168	u8	h_padding[4];
169	__sum16	h_csum;
170
171	u8	h_exthdr[RDS_HEADER_EXT_SPACE];
172};
173
174/*
175 * Reserved - indicates end of extensions
176 */
177#define RDS_EXTHDR_NONE		0
178
179/*
180 * This extension header is included in the very
181 * first message that is sent on a new connection,
182 * and identifies the protocol level. This will help
183 * rolling updates if a future change requires breaking
184 * the protocol.
185 * NB: This is no longer true for IB, where we do a version
186 * negotiation during the connection setup phase (protocol
187 * version information is included in the RDMA CM private data).
188 */
189#define RDS_EXTHDR_VERSION	1
190struct rds_ext_header_version {
191	__be32			h_version;
192};
193
194/*
195 * This extension header is included in the RDS message
196 * chasing an RDMA operation.
197 */
198#define RDS_EXTHDR_RDMA		2
199struct rds_ext_header_rdma {
200	__be32			h_rdma_rkey;
201};
202
203/*
204 * This extension header tells the peer about the
205 * destination <R_Key,offset> of the requested RDMA
206 * operation.
207 */
208#define RDS_EXTHDR_RDMA_DEST	3
209struct rds_ext_header_rdma_dest {
210	__be32			h_rdma_rkey;
211	__be32			h_rdma_offset;
212};
213
214#define __RDS_EXTHDR_MAX	16 /* for now */
215
216struct rds_incoming {
217	atomic_t		i_refcount;
218	struct list_head	i_item;
219	struct rds_connection	*i_conn;
220	struct rds_header	i_hdr;
221	unsigned long		i_rx_jiffies;
222	__be32			i_saddr;
223
224	rds_rdma_cookie_t	i_rdma_cookie;
225};
226
227struct rds_mr {
228	struct rb_node		r_rb_node;
229	atomic_t		r_refcount;
230	u32			r_key;
231
232	/* A copy of the creation flags */
233	unsigned int		r_use_once:1;
234	unsigned int		r_invalidate:1;
235	unsigned int		r_write:1;
236
237	/* This is for RDS_MR_DEAD.
238	 * It would be nice & consistent to make this part of the above
239	 * bit field here, but we need to use test_and_set_bit.
240	 */
241	unsigned long		r_state;
242	struct rds_sock		*r_sock; /* back pointer to the socket that owns us */
243	struct rds_transport	*r_trans;
244	void			*r_trans_private;
245};
246
247/* Flags for mr->r_state */
248#define RDS_MR_DEAD		0
249
250static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
251{
252	return r_key | (((u64) offset) << 32);
253}
254
255static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
256{
257	return cookie;
258}
259
260static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
261{
262	return cookie >> 32;
263}
264
265/* atomic operation types */
266#define RDS_ATOMIC_TYPE_CSWP		0
267#define RDS_ATOMIC_TYPE_FADD		1
268
269/*
270 * m_sock_item and m_conn_item are on lists that are serialized under
271 * conn->c_lock.  m_sock_item has additional meaning in that once it is empty
272 * the message will not be put back on the retransmit list after being sent.
273 * messages that are canceled while being sent rely on this.
274 *
275 * m_inc is used by loopback so that it can pass an incoming message straight
276 * back up into the rx path.  It embeds a wire header which is also used by
277 * the send path, which is kind of awkward.
278 *
279 * m_sock_item indicates the message's presence on a socket's send or receive
280 * queue.  m_rs will point to that socket.
281 *
282 * m_daddr is used by cancellation to prune messages to a given destination.
283 *
284 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
285 * nesting.  As paths iterate over messages on a sock, or conn, they must
286 * also lock the conn, or sock, to remove the message from those lists too.
287 * Testing the flag to determine if the message is still on the lists lets
288 * us avoid testing the list_head directly.  That means each path can use
289 * the message's list_head to keep it on a local list while juggling locks
290 * without confusing the other path.
291 *
292 * m_ack_seq is an optional field set by transports who need a different
293 * sequence number range to invalidate.  They can use this in a callback
294 * that they pass to rds_send_drop_acked() to see if each message has been
295 * acked.  The HAS_ACK_SEQ flag can be used to detect messages which haven't
296 * had ack_seq set yet.
297 */
298#define RDS_MSG_ON_SOCK		1
299#define RDS_MSG_ON_CONN		2
300#define RDS_MSG_HAS_ACK_SEQ	3
301#define RDS_MSG_ACK_REQUIRED	4
302#define RDS_MSG_RETRANSMITTED	5
303#define RDS_MSG_MAPPED		6
304#define RDS_MSG_PAGEVEC		7
305
306struct rds_message {
307	atomic_t		m_refcount;
308	struct list_head	m_sock_item;
309	struct list_head	m_conn_item;
310	struct rds_incoming	m_inc;
311	u64			m_ack_seq;
312	__be32			m_daddr;
313	unsigned long		m_flags;
314
315	/* Never access m_rs without holding m_rs_lock.
316	 * Lock nesting is
317	 *  rm->m_rs_lock
318	 *   -> rs->rs_lock
319	 */
320	spinlock_t		m_rs_lock;
321	wait_queue_head_t	m_flush_wait;
322
323	struct rds_sock		*m_rs;
324
325	/* cookie to send to remote, in rds header */
326	rds_rdma_cookie_t	m_rdma_cookie;
327
328	unsigned int		m_used_sgs;
329	unsigned int		m_total_sgs;
330
331	void			*m_final_op;
332
333	struct {
334		struct rm_atomic_op {
335			int			op_type;
336			union {
337				struct {
338					uint64_t	compare;
339					uint64_t	swap;
340					uint64_t	compare_mask;
341					uint64_t	swap_mask;
342				} op_m_cswp;
343				struct {
344					uint64_t	add;
345					uint64_t	nocarry_mask;
346				} op_m_fadd;
347			};
348
349			u32			op_rkey;
350			u64			op_remote_addr;
351			unsigned int		op_notify:1;
352			unsigned int		op_recverr:1;
353			unsigned int		op_mapped:1;
354			unsigned int		op_silent:1;
355			unsigned int		op_active:1;
356			struct scatterlist	*op_sg;
357			struct rds_notifier	*op_notifier;
358
359			struct rds_mr		*op_rdma_mr;
360		} atomic;
361		struct rm_rdma_op {
362			u32			op_rkey;
363			u64			op_remote_addr;
364			unsigned int		op_write:1;
365			unsigned int		op_fence:1;
366			unsigned int		op_notify:1;
367			unsigned int		op_recverr:1;
368			unsigned int		op_mapped:1;
369			unsigned int		op_silent:1;
370			unsigned int		op_active:1;
371			unsigned int		op_bytes;
372			unsigned int		op_nents;
373			unsigned int		op_count;
374			struct scatterlist	*op_sg;
375			struct rds_notifier	*op_notifier;
376
377			struct rds_mr		*op_rdma_mr;
378		} rdma;
379		struct rm_data_op {
380			unsigned int		op_active:1;
381			unsigned int		op_nents;
382			unsigned int		op_count;
383			unsigned int		op_dmasg;
384			unsigned int		op_dmaoff;
385			struct scatterlist	*op_sg;
386		} data;
387	};
388};
389
390/*
391 * The RDS notifier is used (optionally) to tell the application about
392 * completed RDMA operations. Rather than keeping the whole rds message
393 * around on the queue, we allocate a small notifier that is put on the
394 * socket's notifier_list. Notifications are delivered to the application
395 * through control messages.
396 */
397struct rds_notifier {
398	struct list_head	n_list;
399	uint64_t		n_user_token;
400	int			n_status;
401};
402
403/**
404 * struct rds_transport -  transport specific behavioural hooks
405 *
406 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
407 *        part of a message.  The caller serializes on the send_sem so this
408 *        doesn't need to be reentrant for a given conn.  The header must be
409 *        sent before the data payload.  .xmit must be prepared to send a
410 *        message with no data payload.  .xmit should return the number of
411 *        bytes that were sent down the connection, including header bytes.
412 *        Returning 0 tells the caller that it doesn't need to perform any
413 *        additional work now.  This is usually the case when the transport has
414 *        filled the sending queue for its connection and will handle
415 *        triggering the rds thread to continue the send when space becomes
416 *        available.  Returning -EAGAIN tells the caller to retry the send
417 *        immediately.  Returning -ENOMEM tells the caller to retry the send at
418 *        some point in the future.
419 *
420 * @conn_shutdown: conn_shutdown stops traffic on the given connection.  Once
421 *                 it returns the connection can not call rds_recv_incoming().
422 *                 This will only be called once after conn_connect returns
423 *                 non-zero success and will The caller serializes this with
424 *                 the send and connecting paths (xmit_* and conn_*).  The
425 *                 transport is responsible for other serialization, including
426 *                 rds_recv_incoming().  This is called in process context but
427 *                 should try hard not to block.
428 */
429
430struct rds_transport {
431	char			t_name[TRANSNAMSIZ];
432	struct list_head	t_item;
433	struct module		*t_owner;
434	unsigned int		t_prefer_loopback:1;
435	unsigned int		t_type;
436
437	int (*laddr_check)(struct net *net, __be32 addr);
438	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
439	void (*conn_free)(void *data);
440	int (*conn_connect)(struct rds_connection *conn);
441	void (*conn_shutdown)(struct rds_connection *conn);
442	void (*xmit_prepare)(struct rds_connection *conn);
443	void (*xmit_complete)(struct rds_connection *conn);
444	int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
445		    unsigned int hdr_off, unsigned int sg, unsigned int off);
446	int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
447	int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
448	int (*recv)(struct rds_connection *conn);
449	int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
450	void (*inc_free)(struct rds_incoming *inc);
451
452	int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
453				 struct rdma_cm_event *event);
454	int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
455	void (*cm_connect_complete)(struct rds_connection *conn,
456				    struct rdma_cm_event *event);
457
458	unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
459					unsigned int avail);
460	void (*exit)(void);
461	void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
462			struct rds_sock *rs, u32 *key_ret);
463	void (*sync_mr)(void *trans_private, int direction);
464	void (*free_mr)(void *trans_private, int invalidate);
465	void (*flush_mrs)(void);
466};
467
468struct rds_sock {
469	struct sock		rs_sk;
470
471	u64			rs_user_addr;
472	u64			rs_user_bytes;
473
474	/*
475	 * bound_addr used for both incoming and outgoing, no INADDR_ANY
476	 * support.
477	 */
478	struct rhash_head	rs_bound_node;
479	u64			rs_bound_key;
480	__be32			rs_bound_addr;
481	__be32			rs_conn_addr;
482	__be16			rs_bound_port;
483	__be16			rs_conn_port;
484	struct rds_transport    *rs_transport;
485
486	/*
487	 * rds_sendmsg caches the conn it used the last time around.
488	 * This helps avoid costly lookups.
489	 */
490	struct rds_connection	*rs_conn;
491
492	/* flag indicating we were congested or not */
493	int			rs_congested;
494	/* seen congestion (ENOBUFS) when sending? */
495	int			rs_seen_congestion;
496
497	/* rs_lock protects all these adjacent members before the newline */
498	spinlock_t		rs_lock;
499	struct list_head	rs_send_queue;
500	u32			rs_snd_bytes;
501	int			rs_rcv_bytes;
502	struct list_head	rs_notify_queue;	/* currently used for failed RDMAs */
503
504	/* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
505	 * to decide whether the application should be woken up.
506	 * If not set, we use rs_cong_track to find out whether a cong map
507	 * update arrived.
508	 */
509	uint64_t		rs_cong_mask;
510	uint64_t		rs_cong_notify;
511	struct list_head	rs_cong_list;
512	unsigned long		rs_cong_track;
513
514	/*
515	 * rs_recv_lock protects the receive queue, and is
516	 * used to serialize with rds_release.
517	 */
518	rwlock_t		rs_recv_lock;
519	struct list_head	rs_recv_queue;
520
521	/* just for stats reporting */
522	struct list_head	rs_item;
523
524	/* these have their own lock */
525	spinlock_t		rs_rdma_lock;
526	struct rb_root		rs_rdma_keys;
527
528	/* Socket options - in case there will be more */
529	unsigned char		rs_recverr,
530				rs_cong_monitor;
531};
532
533static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
534{
535	return container_of(sk, struct rds_sock, rs_sk);
536}
537static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
538{
539	return &rs->rs_sk;
540}
541
542/*
543 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
544 * to account for overhead.  We don't account for overhead, we just apply
545 * the number of payload bytes to the specified value.
546 */
547static inline int rds_sk_sndbuf(struct rds_sock *rs)
548{
549	return rds_rs_to_sk(rs)->sk_sndbuf / 2;
550}
551static inline int rds_sk_rcvbuf(struct rds_sock *rs)
552{
553	return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
554}
555
556struct rds_statistics {
557	uint64_t	s_conn_reset;
558	uint64_t	s_recv_drop_bad_checksum;
559	uint64_t	s_recv_drop_old_seq;
560	uint64_t	s_recv_drop_no_sock;
561	uint64_t	s_recv_drop_dead_sock;
562	uint64_t	s_recv_deliver_raced;
563	uint64_t	s_recv_delivered;
564	uint64_t	s_recv_queued;
565	uint64_t	s_recv_immediate_retry;
566	uint64_t	s_recv_delayed_retry;
567	uint64_t	s_recv_ack_required;
568	uint64_t	s_recv_rdma_bytes;
569	uint64_t	s_recv_ping;
570	uint64_t	s_send_queue_empty;
571	uint64_t	s_send_queue_full;
572	uint64_t	s_send_lock_contention;
573	uint64_t	s_send_lock_queue_raced;
574	uint64_t	s_send_immediate_retry;
575	uint64_t	s_send_delayed_retry;
576	uint64_t	s_send_drop_acked;
577	uint64_t	s_send_ack_required;
578	uint64_t	s_send_queued;
579	uint64_t	s_send_rdma;
580	uint64_t	s_send_rdma_bytes;
581	uint64_t	s_send_pong;
582	uint64_t	s_page_remainder_hit;
583	uint64_t	s_page_remainder_miss;
584	uint64_t	s_copy_to_user;
585	uint64_t	s_copy_from_user;
586	uint64_t	s_cong_update_queued;
587	uint64_t	s_cong_update_received;
588	uint64_t	s_cong_send_error;
589	uint64_t	s_cong_send_blocked;
590};
591
592/* af_rds.c */
593void rds_sock_addref(struct rds_sock *rs);
594void rds_sock_put(struct rds_sock *rs);
595void rds_wake_sk_sleep(struct rds_sock *rs);
596static inline void __rds_wake_sk_sleep(struct sock *sk)
597{
598	wait_queue_head_t *waitq = sk_sleep(sk);
599
600	if (!sock_flag(sk, SOCK_DEAD) && waitq)
601		wake_up(waitq);
602}
603extern wait_queue_head_t rds_poll_waitq;
604
605
606/* bind.c */
607int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
608void rds_remove_bound(struct rds_sock *rs);
609struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
610int rds_bind_lock_init(void);
611void rds_bind_lock_destroy(void);
612
613/* cong.c */
614int rds_cong_get_maps(struct rds_connection *conn);
615void rds_cong_add_conn(struct rds_connection *conn);
616void rds_cong_remove_conn(struct rds_connection *conn);
617void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
618void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
619int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
620void rds_cong_queue_updates(struct rds_cong_map *map);
621void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
622int rds_cong_updated_since(unsigned long *recent);
623void rds_cong_add_socket(struct rds_sock *);
624void rds_cong_remove_socket(struct rds_sock *);
625void rds_cong_exit(void);
626struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
627
628/* conn.c */
629int rds_conn_init(void);
630void rds_conn_exit(void);
631struct rds_connection *rds_conn_create(struct net *net,
632				       __be32 laddr, __be32 faddr,
633				       struct rds_transport *trans, gfp_t gfp);
634struct rds_connection *rds_conn_create_outgoing(struct net *net,
635						__be32 laddr, __be32 faddr,
636			       struct rds_transport *trans, gfp_t gfp);
637void rds_conn_shutdown(struct rds_connection *conn);
638void rds_conn_destroy(struct rds_connection *conn);
639void rds_conn_drop(struct rds_connection *conn);
640void rds_conn_connect_if_down(struct rds_connection *conn);
641void rds_for_each_conn_info(struct socket *sock, unsigned int len,
642			  struct rds_info_iterator *iter,
643			  struct rds_info_lengths *lens,
644			  int (*visitor)(struct rds_connection *, void *),
645			  size_t item_len);
646__printf(2, 3)
647void __rds_conn_error(struct rds_connection *conn, const char *, ...);
648#define rds_conn_error(conn, fmt...) \
649	__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
650
651static inline int
652rds_conn_transition(struct rds_connection *conn, int old, int new)
653{
654	return atomic_cmpxchg(&conn->c_state, old, new) == old;
655}
656
657static inline int
658rds_conn_state(struct rds_connection *conn)
659{
660	return atomic_read(&conn->c_state);
661}
662
663static inline int
664rds_conn_up(struct rds_connection *conn)
665{
666	return atomic_read(&conn->c_state) == RDS_CONN_UP;
667}
668
669static inline int
670rds_conn_connecting(struct rds_connection *conn)
671{
672	return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING;
673}
674
675/* message.c */
676struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
677struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
678int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
679struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
680void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
681				 __be16 dport, u64 seq);
682int rds_message_add_extension(struct rds_header *hdr,
683			      unsigned int type, const void *data, unsigned int len);
684int rds_message_next_extension(struct rds_header *hdr,
685			       unsigned int *pos, void *buf, unsigned int *buflen);
686int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
687int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
688void rds_message_inc_free(struct rds_incoming *inc);
689void rds_message_addref(struct rds_message *rm);
690void rds_message_put(struct rds_message *rm);
691void rds_message_wait(struct rds_message *rm);
692void rds_message_unmapped(struct rds_message *rm);
693
694static inline void rds_message_make_checksum(struct rds_header *hdr)
695{
696	hdr->h_csum = 0;
697	hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
698}
699
700static inline int rds_message_verify_checksum(const struct rds_header *hdr)
701{
702	return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
703}
704
705
706/* page.c */
707int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
708			     gfp_t gfp);
709int rds_page_copy_user(struct page *page, unsigned long offset,
710		       void __user *ptr, unsigned long bytes,
711		       int to_user);
712#define rds_page_copy_to_user(page, offset, ptr, bytes) \
713	rds_page_copy_user(page, offset, ptr, bytes, 1)
714#define rds_page_copy_from_user(page, offset, ptr, bytes) \
715	rds_page_copy_user(page, offset, ptr, bytes, 0)
716void rds_page_exit(void);
717
718/* recv.c */
719void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
720		  __be32 saddr);
721void rds_inc_put(struct rds_incoming *inc);
722void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
723		       struct rds_incoming *inc, gfp_t gfp);
724int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
725		int msg_flags);
726void rds_clear_recv_queue(struct rds_sock *rs);
727int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
728void rds_inc_info_copy(struct rds_incoming *inc,
729		       struct rds_info_iterator *iter,
730		       __be32 saddr, __be32 daddr, int flip);
731
732/* send.c */
733int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
734void rds_send_reset(struct rds_connection *conn);
735int rds_send_xmit(struct rds_connection *conn);
736struct sockaddr_in;
737void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
738typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
739void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
740			 is_acked_func is_acked);
741int rds_send_pong(struct rds_connection *conn, __be16 dport);
742struct rds_message *rds_send_get_message(struct rds_connection *,
743					 struct rm_rdma_op *);
744
745/* rdma.c */
746void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
747int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
748int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
749int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
750void rds_rdma_drop_keys(struct rds_sock *rs);
751int rds_rdma_extra_size(struct rds_rdma_args *args);
752int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
753			  struct cmsghdr *cmsg);
754int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
755			  struct cmsghdr *cmsg);
756int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
757			  struct cmsghdr *cmsg);
758int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
759			  struct cmsghdr *cmsg);
760void rds_rdma_free_op(struct rm_rdma_op *ro);
761void rds_atomic_free_op(struct rm_atomic_op *ao);
762void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
763void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
764int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
765		    struct cmsghdr *cmsg);
766
767void __rds_put_mr_final(struct rds_mr *mr);
768static inline void rds_mr_put(struct rds_mr *mr)
769{
770	if (atomic_dec_and_test(&mr->r_refcount))
771		__rds_put_mr_final(mr);
772}
773
774/* stats.c */
775DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
776#define rds_stats_inc_which(which, member) do {		\
777	per_cpu(which, get_cpu()).member++;		\
778	put_cpu();					\
779} while (0)
780#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
781#define rds_stats_add_which(which, member, count) do {		\
782	per_cpu(which, get_cpu()).member += count;	\
783	put_cpu();					\
784} while (0)
785#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
786int rds_stats_init(void);
787void rds_stats_exit(void);
788void rds_stats_info_copy(struct rds_info_iterator *iter,
789			 uint64_t *values, const char *const *names,
790			 size_t nr);
791
792/* sysctl.c */
793int rds_sysctl_init(void);
794void rds_sysctl_exit(void);
795extern unsigned long rds_sysctl_sndbuf_min;
796extern unsigned long rds_sysctl_sndbuf_default;
797extern unsigned long rds_sysctl_sndbuf_max;
798extern unsigned long rds_sysctl_reconnect_min_jiffies;
799extern unsigned long rds_sysctl_reconnect_max_jiffies;
800extern unsigned int  rds_sysctl_max_unacked_packets;
801extern unsigned int  rds_sysctl_max_unacked_bytes;
802extern unsigned int  rds_sysctl_ping_enable;
803extern unsigned long rds_sysctl_trace_flags;
804extern unsigned int  rds_sysctl_trace_level;
805
806/* threads.c */
807int rds_threads_init(void);
808void rds_threads_exit(void);
809extern struct workqueue_struct *rds_wq;
810void rds_queue_reconnect(struct rds_connection *conn);
811void rds_connect_worker(struct work_struct *);
812void rds_shutdown_worker(struct work_struct *);
813void rds_send_worker(struct work_struct *);
814void rds_recv_worker(struct work_struct *);
815void rds_connect_complete(struct rds_connection *conn);
816
817/* transport.c */
818int rds_trans_register(struct rds_transport *trans);
819void rds_trans_unregister(struct rds_transport *trans);
820struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
821void rds_trans_put(struct rds_transport *trans);
822unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
823				       unsigned int avail);
824struct rds_transport *rds_trans_get(int t_type);
825int rds_trans_init(void);
826void rds_trans_exit(void);
827
828#endif
829