This source file includes following definitions.
- cplhdr
- cxgbit_get_cdev
- cxgbit_put_cdev
- cxgbit_get_csk
- cxgbit_put_csk
- cxgbit_get_cnp
- cxgbit_put_cnp
- cxgbit_sock_reset_wr_list
- cxgbit_sock_peek_wr
- cxgbit_sock_enqueue_wr
- cxgbit_sock_dequeue_wr
- cdev2ppm
1
2
3
4
5
6 #ifndef __CXGBIT_H__
7 #define __CXGBIT_H__
8
9 #include <linux/mutex.h>
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/idr.h>
13 #include <linux/completion.h>
14 #include <linux/netdevice.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/inet.h>
19 #include <linux/wait.h>
20 #include <linux/kref.h>
21 #include <linux/timer.h>
22 #include <linux/io.h>
23
24 #include <asm/byteorder.h>
25
26 #include <net/net_namespace.h>
27
28 #include <target/iscsi/iscsi_transport.h>
29 #include <iscsi_target_parameters.h>
30 #include <iscsi_target_login.h>
31
32 #include "t4_regs.h"
33 #include "t4_msg.h"
34 #include "cxgb4.h"
35 #include "cxgb4_uld.h"
36 #include "l2t.h"
37 #include "libcxgb_ppm.h"
38 #include "cxgbit_lro.h"
39
40 extern struct mutex cdev_list_lock;
41 extern struct list_head cdev_list_head;
42 struct cxgbit_np;
43
44 struct cxgbit_sock;
45
46 struct cxgbit_cmd {
47 struct scatterlist sg;
48 struct cxgbi_task_tag_info ttinfo;
49 bool setup_ddp;
50 bool release;
51 };
52
53 #define CXGBIT_MAX_ISO_PAYLOAD \
54 min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
55
56 struct cxgbit_iso_info {
57 u8 flags;
58 u32 mpdu;
59 u32 len;
60 u32 burst_len;
61 };
62
63 enum cxgbit_skcb_flags {
64 SKCBF_TX_NEED_HDR = (1 << 0),
65 SKCBF_TX_FLAG_COMPL = (1 << 1),
66 SKCBF_TX_ISO = (1 << 2),
67 SKCBF_RX_LRO = (1 << 3),
68 };
69
70 struct cxgbit_skb_rx_cb {
71 u8 opcode;
72 void *pdu_cb;
73 void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
74 };
75
76 struct cxgbit_skb_tx_cb {
77 u8 submode;
78 u32 extra_len;
79 };
80
81 union cxgbit_skb_cb {
82 struct {
83 u8 flags;
84 union {
85 struct cxgbit_skb_tx_cb tx;
86 struct cxgbit_skb_rx_cb rx;
87 };
88 };
89
90 struct {
91
92 struct l2t_skb_cb l2t;
93 struct sk_buff *wr_next;
94 };
95 };
96
97 #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
98 #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
99 #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
100 #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
101 #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
102 #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
103 #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
104 #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
105
106 static inline void *cplhdr(struct sk_buff *skb)
107 {
108 return skb->data;
109 }
110
111 enum cxgbit_cdev_flags {
112 CDEV_STATE_UP = 0,
113 CDEV_ISO_ENABLE,
114 CDEV_DDP_ENABLE,
115 };
116
117 #define NP_INFO_HASH_SIZE 32
118
119 struct np_info {
120 struct np_info *next;
121 struct cxgbit_np *cnp;
122 unsigned int stid;
123 };
124
125 struct cxgbit_list_head {
126 struct list_head list;
127
128 spinlock_t lock;
129 };
130
131 struct cxgbit_device {
132 struct list_head list;
133 struct cxgb4_lld_info lldi;
134 struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
135
136 spinlock_t np_lock;
137 u8 selectq[MAX_NPORTS][2];
138 struct cxgbit_list_head cskq;
139 u32 mdsl;
140 struct kref kref;
141 unsigned long flags;
142 };
143
144 struct cxgbit_wr_wait {
145 struct completion completion;
146 int ret;
147 };
148
149 enum cxgbit_csk_state {
150 CSK_STATE_IDLE = 0,
151 CSK_STATE_LISTEN,
152 CSK_STATE_CONNECTING,
153 CSK_STATE_ESTABLISHED,
154 CSK_STATE_ABORTING,
155 CSK_STATE_CLOSING,
156 CSK_STATE_MORIBUND,
157 CSK_STATE_DEAD,
158 };
159
160 enum cxgbit_csk_flags {
161 CSK_TX_DATA_SENT = 0,
162 CSK_LOGIN_PDU_DONE,
163 CSK_LOGIN_DONE,
164 CSK_DDP_ENABLE,
165 CSK_ABORT_RPL_WAIT,
166 };
167
168 struct cxgbit_sock_common {
169 struct cxgbit_device *cdev;
170 struct sockaddr_storage local_addr;
171 struct sockaddr_storage remote_addr;
172 struct cxgbit_wr_wait wr_wait;
173 enum cxgbit_csk_state state;
174 unsigned long flags;
175 };
176
177 struct cxgbit_np {
178 struct cxgbit_sock_common com;
179 wait_queue_head_t accept_wait;
180 struct iscsi_np *np;
181 struct completion accept_comp;
182 struct list_head np_accept_list;
183
184 spinlock_t np_accept_lock;
185 struct kref kref;
186 unsigned int stid;
187 };
188
189 struct cxgbit_sock {
190 struct cxgbit_sock_common com;
191 struct cxgbit_np *cnp;
192 struct iscsi_conn *conn;
193 struct l2t_entry *l2t;
194 struct dst_entry *dst;
195 struct list_head list;
196 struct sk_buff_head rxq;
197 struct sk_buff_head txq;
198 struct sk_buff_head ppodq;
199 struct sk_buff_head backlogq;
200 struct sk_buff_head skbq;
201 struct sk_buff *wr_pending_head;
202 struct sk_buff *wr_pending_tail;
203 struct sk_buff *skb;
204 struct sk_buff *lro_skb;
205 struct sk_buff *lro_hskb;
206 struct list_head accept_node;
207
208 spinlock_t lock;
209 wait_queue_head_t waitq;
210 wait_queue_head_t ack_waitq;
211 bool lock_owner;
212 struct kref kref;
213 u32 max_iso_npdu;
214 u32 wr_cred;
215 u32 wr_una_cred;
216 u32 wr_max_cred;
217 u32 snd_una;
218 u32 tid;
219 u32 snd_nxt;
220 u32 rcv_nxt;
221 u32 smac_idx;
222 u32 tx_chan;
223 u32 mtu;
224 u32 write_seq;
225 u32 rx_credits;
226 u32 snd_win;
227 u32 rcv_win;
228 u16 mss;
229 u16 emss;
230 u16 plen;
231 u16 rss_qid;
232 u16 txq_idx;
233 u16 ctrlq_idx;
234 u8 tos;
235 u8 port_id;
236 #define CXGBIT_SUBMODE_HCRC 0x1
237 #define CXGBIT_SUBMODE_DCRC 0x2
238 u8 submode;
239 #ifdef CONFIG_CHELSIO_T4_DCB
240 u8 dcb_priority;
241 #endif
242 u8 snd_wscale;
243 };
244
245 void _cxgbit_free_cdev(struct kref *kref);
246 void _cxgbit_free_csk(struct kref *kref);
247 void _cxgbit_free_cnp(struct kref *kref);
248
249 static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
250 {
251 kref_get(&cdev->kref);
252 }
253
254 static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
255 {
256 kref_put(&cdev->kref, _cxgbit_free_cdev);
257 }
258
259 static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
260 {
261 kref_get(&csk->kref);
262 }
263
264 static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
265 {
266 kref_put(&csk->kref, _cxgbit_free_csk);
267 }
268
269 static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
270 {
271 kref_get(&cnp->kref);
272 }
273
274 static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
275 {
276 kref_put(&cnp->kref, _cxgbit_free_cnp);
277 }
278
279 static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
280 {
281 csk->wr_pending_tail = NULL;
282 csk->wr_pending_head = NULL;
283 }
284
285 static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
286 {
287 return csk->wr_pending_head;
288 }
289
290 static inline void
291 cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
292 {
293 cxgbit_skcb_tx_wr_next(skb) = NULL;
294
295 skb_get(skb);
296
297 if (!csk->wr_pending_head)
298 csk->wr_pending_head = skb;
299 else
300 cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
301 csk->wr_pending_tail = skb;
302 }
303
304 static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
305 {
306 struct sk_buff *skb = csk->wr_pending_head;
307
308 if (likely(skb)) {
309 csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
310 cxgbit_skcb_tx_wr_next(skb) = NULL;
311 }
312 return skb;
313 }
314
315 typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
316 struct sk_buff *);
317
318 int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
319 int cxgbit_setup_conn_digest(struct cxgbit_sock *);
320 int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
321 void cxgbit_free_np(struct iscsi_np *);
322 void cxgbit_abort_conn(struct cxgbit_sock *csk);
323 void cxgbit_free_conn(struct iscsi_conn *);
324 extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
325 int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
326 int cxgbit_rx_data_ack(struct cxgbit_sock *);
327 int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
328 struct l2t_entry *);
329 void cxgbit_push_tx_frames(struct cxgbit_sock *);
330 int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
331 int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
332 struct iscsi_datain_req *, const void *, u32);
333 void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
334 struct iscsi_r2t *);
335 u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
336 int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
337 void cxgbit_get_rx_pdu(struct iscsi_conn *);
338 int cxgbit_validate_params(struct iscsi_conn *);
339 struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
340
341
342 int cxgbit_ddp_init(struct cxgbit_device *);
343 int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
344 int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
345 void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
346
347 static inline
348 struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
349 {
350 return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
351 }
352 #endif